file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
switch.go | package output
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/Jeffail/benthos/v3/internal/batch"
"github.com/Jeffail/benthos/v3/internal/bloblang/mapping"
"github.com/Jeffail/benthos/v3/internal/component/output"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
imessage "github.com/Jeffail/benthos/v3/internal/message"
"github.com/Jeffail/benthos/v3/internal/shutdown"
"github.com/Jeffail/benthos/v3/lib/condition"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/response"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/util/throttle"
"github.com/Jeffail/gabs/v2"
"golang.org/x/sync/errgroup"
)
//------------------------------------------------------------------------------
var (
// ErrSwitchNoConditionMet is returned when a message does not match any
// output conditions.
ErrSwitchNoConditionMet = errors.New("no switch output conditions were met by message")
// ErrSwitchNoCasesMatched is returned when a message does not match any
// output cases.
ErrSwitchNoCasesMatched = errors.New("no switch cases were matched by message")
// ErrSwitchNoOutputs is returned when creating a Switch type with less than
// 2 outputs.
ErrSwitchNoOutputs = errors.New("attempting to create switch with fewer than 2 cases")
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeSwitch] = TypeSpec{
constructor: fromSimpleConstructor(NewSwitch),
Summary: `
The switch output type allows you to route messages to different outputs based on their contents.`,
Description: `
Messages must successfully route to one or more outputs, otherwise this is considered an error and the message is reprocessed. In order to explicitly drop messages that do not match your cases add one final case with a [drop output](/docs/components/outputs/drop).`,
config: docs.FieldComponent().WithChildren(
docs.FieldCommon(
"retry_until_success", `
If a selected output fails to send a message this field determines whether it is
reattempted indefinitely. If set to false the error is instead propagated back
to the input level.
If a message can be routed to >1 outputs it is usually best to set this to true
in order to avoid duplicate messages being routed to an output.`,
),
docs.FieldAdvanced(
"strict_mode", `
This field determines whether an error should be reported if no condition is met.
If set to true, an error is propagated back to the input level. The default
behavior is false, which will drop the message.`,
),
docs.FieldAdvanced(
"max_in_flight", "The maximum number of parallel message batches to have in flight at any given time. Note that if a child output has a higher `max_in_flight` then the switch output will automatically match it, therefore this value is the minimum `max_in_flight` to set in cases where the child values can't be inferred (such as when using resource outputs as children).",
),
docs.FieldCommon(
"cases",
"A list of switch cases, outlining outputs that can be routed to.",
[]interface{}{
map[string]interface{}{
"check": `this.urls.contains("http://benthos.dev")`,
"output": map[string]interface{}{
"cache": map[string]interface{}{
"target": "foo",
"key": "${!json(\"id\")}",
},
},
"continue": true,
},
map[string]interface{}{
"output": map[string]interface{}{
"s3": map[string]interface{}{
"bucket": "bar",
"path": "${!json(\"id\")}",
},
},
},
},
).Array().WithChildren(
docs.FieldBloblang(
"check",
"A [Bloblang query](/docs/guides/bloblang/about/) that should return a boolean value indicating whether a message should be routed to the case output. If left empty the case always passes.",
`this.type == "foo"`,
`this.contents.urls.contains("https://benthos.dev/")`,
).HasDefault(""),
docs.FieldCommon(
"output", "An [output](/docs/components/outputs/about/) for messages that pass the check to be routed to.",
).HasDefault(map[string]interface{}{}).HasType(docs.FieldTypeOutput),
docs.FieldAdvanced(
"continue",
"Indicates whether, if this case passes for a message, the next case should also be tested.",
).HasDefault(false).HasType(docs.FieldTypeBool),
),
docs.FieldDeprecated("outputs").Array().WithChildren(
docs.FieldDeprecated("condition").HasType(docs.FieldTypeCondition),
docs.FieldDeprecated("fallthrough"),
docs.FieldDeprecated("output").HasType(docs.FieldTypeOutput),
).OmitWhen(func(v, _ interface{}) (string, bool) {
arr, ok := v.([]interface{})
return "field outputs is deprecated in favour of cases", ok && len(arr) == 0
}),
).Linter(func(ctx docs.LintContext, line, col int, value interface{}) []docs.Lint {
if _, ok := value.(map[string]interface{}); !ok {
return nil
}
gObj := gabs.Wrap(value)
retry, exists := gObj.S("retry_until_success").Data().(bool)
// TODO: V4 Is retry_until_success going to be false by default now?
if exists && !retry {
return nil
}
for _, cObj := range gObj.S("cases").Children() {
typeStr, _ := cObj.S("output", "type").Data().(string)
isReject := cObj.Exists("output", "reject")
if typeStr == "reject" || isReject {
return []docs.Lint{
docs.NewLintError(line, "a `switch` output with a `reject` case output must have the field `switch.retry_until_success` set to `false` (defaults to `true`), otherwise the `reject` child output will result in infinite retries"),
}
}
}
return nil
}),
Categories: []Category{
CategoryUtility,
},
Examples: []docs.AnnotatedExample{
{
Title: "Basic Multiplexing",
Summary: `
The most common use for a switch output is to multiplex messages across a range of output destinations. The following config checks the contents of the field ` + "`type` of messages and sends `foo` type messages to an `amqp_1` output, `bar` type messages to a `gcp_pubsub` output, and everything else to a `redis_streams` output" + `.
Outputs can have their own processors associated with them, and in this example the ` + "`redis_streams`" + ` output has a processor that enforces the presence of a type field before sending it.`,
Config: `
output:
switch:
cases:
- check: this.type == "foo"
output:
amqp_1:
url: amqps://guest:guest@localhost:5672/
target_address: queue:/the_foos
- check: this.type == "bar"
output:
gcp_pubsub:
project: dealing_with_mike
topic: mikes_bars
- output:
redis_streams:
url: tcp://localhost:6379
stream: everything_else
processors:
- bloblang: |
root = this
root.type = this.type | "unknown"
`,
},
{
Title: "Control Flow",
Summary: `
The ` + "`continue`" + ` field allows messages that have passed a case to be tested against the next one also. This can be useful when combining non-mutually-exclusive case checks.
In the following example a message that passes both the check of the first case as well as the second will be routed to both.`,
Config: `
output:
switch:
cases:
- check: 'this.user.interests.contains("walks").catch(false)'
output:
amqp_1:
url: amqps://guest:guest@localhost:5672/
target_address: queue:/people_what_think_good
continue: true
- check: 'this.user.dislikes.contains("videogames").catch(false)'
output:
gcp_pubsub:
project: people
topic: that_i_dont_want_to_hang_with
`,
},
},
}
}
//------------------------------------------------------------------------------
// SwitchConfig contains configuration fields for the Switch output type.
type SwitchConfig struct {
RetryUntilSuccess bool `json:"retry_until_success" yaml:"retry_until_success"`
StrictMode bool `json:"strict_mode" yaml:"strict_mode"`
MaxInFlight int `json:"max_in_flight" yaml:"max_in_flight"`
Cases []SwitchConfigCase `json:"cases" yaml:"cases"`
Outputs []SwitchConfigOutput `json:"outputs" yaml:"outputs"`
}
// NewSwitchConfig creates a new SwitchConfig with default values.
func NewSwitchConfig() SwitchConfig {
return SwitchConfig{
RetryUntilSuccess: true,
// TODO: V4 consider making this true by default.
StrictMode: false,
MaxInFlight: 1,
Cases: []SwitchConfigCase{},
Outputs: []SwitchConfigOutput{},
}
}
// SwitchConfigCase contains configuration fields per output of a switch type.
type SwitchConfigCase struct {
Check string `json:"check" yaml:"check"`
Continue bool `json:"continue" yaml:"continue"`
Output Config `json:"output" yaml:"output"`
}
// NewSwitchConfigCase creates a new switch output config with default values.
func NewSwitchConfigCase() SwitchConfigCase {
return SwitchConfigCase{
Check: "",
Continue: false,
Output: NewConfig(),
}
}
//------------------------------------------------------------------------------
// Switch is a broker that implements types.Consumer and broadcasts each message
// out to an array of outputs.
type Switch struct {
logger log.Modular
stats metrics.Type
mMsgRcvd metrics.StatCounter
mMsgSnt metrics.StatCounter
mOutputErr metrics.StatCounter
maxInFlight int
transactions <-chan types.Transaction
retryUntilSuccess bool
strictMode bool
outputTSChans []chan types.Transaction
outputs []types.Output
checks []*mapping.Executor
conditions []types.Condition
continues []bool
fallthroughs []bool
ctx context.Context
close func()
closedChan chan struct{}
}
// NewSwitch creates a new Switch type by providing outputs. Messages will be
// sent to a subset of outputs according to condition and fallthrough settings.
func NewSwitch(
conf Config,
mgr types.Manager,
logger log.Modular,
stats metrics.Type,
) (Type, error) {
ctx, done := context.WithCancel(context.Background())
o := &Switch{
stats: stats,
logger: logger,
maxInFlight: conf.Switch.MaxInFlight,
transactions: nil,
retryUntilSuccess: conf.Switch.RetryUntilSuccess,
strictMode: conf.Switch.StrictMode,
closedChan: make(chan struct{}),
ctx: ctx,
close: done,
mMsgRcvd: stats.GetCounter("switch.messages.received"),
mMsgSnt: stats.GetCounter("switch.messages.sent"),
mOutputErr: stats.GetCounter("switch.output.error"),
}
lCases := len(conf.Switch.Cases)
lOutputs := len(conf.Switch.Outputs)
if lCases < 2 && lOutputs < 2 {
return nil, ErrSwitchNoOutputs
}
if lCases > 0 {
if lOutputs > 0 {
return nil, errors.New("combining switch cases with deprecated outputs is not supported")
}
o.outputs = make([]types.Output, lCases)
o.checks = make([]*mapping.Executor, lCases)
o.continues = make([]bool, lCases)
o.fallthroughs = make([]bool, lCases)
} else {
o.outputs = make([]types.Output, lOutputs)
o.conditions = make([]types.Condition, lOutputs)
o.fallthroughs = make([]bool, lOutputs)
}
var err error
for i, oConf := range conf.Switch.Outputs {
ns := fmt.Sprintf("switch.%v", i)
oMgr, oLog, oStats := interop.LabelChild(ns+".output", mgr, logger, stats)
oStats = metrics.Combine(stats, oStats)
if o.outputs[i], err = New(oConf.Output, oMgr, oLog, oStats); err != nil {
return nil, fmt.Errorf("failed to create output '%v' type '%v': %v", i, oConf.Output.Type, err)
}
cMgr, cLog, cStats := interop.LabelChild(ns+".condition", mgr, logger, stats)
if o.conditions[i], err = condition.New(oConf.Condition, cMgr, cLog, cStats); err != nil {
return nil, fmt.Errorf("failed to create output '%v' condition '%v': %v", i, oConf.Condition.Type, err)
}
o.fallthroughs[i] = oConf.Fallthrough
}
for i, cConf := range conf.Switch.Cases {
oMgr, oLog, oStats := interop.LabelChild(fmt.Sprintf("switch.%v.output", i), mgr, logger, stats)
oStats = metrics.Combine(stats, oStats)
if o.outputs[i], err = New(cConf.Output, oMgr, oLog, oStats); err != nil {
return nil, fmt.Errorf("failed to create case '%v' output type '%v': %v", i, cConf.Output.Type, err) | }
o.continues[i] = cConf.Continue
}
o.outputTSChans = make([]chan types.Transaction, len(o.outputs))
for i := range o.outputTSChans {
if mif, ok := output.GetMaxInFlight(o.outputs[i]); ok && mif > o.maxInFlight {
o.maxInFlight = mif
}
o.outputTSChans[i] = make(chan types.Transaction)
if err := o.outputs[i].Consume(o.outputTSChans[i]); err != nil {
return nil, err
}
}
return o, nil
}
//------------------------------------------------------------------------------
// Consume assigns a new transactions channel for the broker to read.
func (o *Switch) Consume(transactions <-chan types.Transaction) error {
if o.transactions != nil {
return types.ErrAlreadyStarted
}
o.transactions = transactions
if len(o.conditions) > 0 {
o.logger.Warnf("Using deprecated field `outputs` which will be removed in the next major release of Benthos. For more information check out the docs at https://www.benthos.dev/docs/components/outputs/switch.")
go o.loopDeprecated()
} else {
go o.loop()
}
return nil
}
// MaxInFlight returns the maximum number of in flight messages permitted by the
// output. This value can be used to determine a sensible value for parent
// outputs, but should not be relied upon as part of dispatcher logic.
func (o *Switch) MaxInFlight() (int, bool) {
return o.maxInFlight, true
}
// Connected returns a boolean indicating whether this output is currently
// connected to its target.
func (o *Switch) Connected() bool {
for _, out := range o.outputs {
if !out.Connected() {
return false
}
}
return true
}
//------------------------------------------------------------------------------
func (o *Switch) dispatchRetryOnErr(outputTargets [][]types.Part) error {
var owg errgroup.Group
for target, parts := range outputTargets {
if len(parts) == 0 {
continue
}
msgCopy, i := message.New(nil), target
msgCopy.SetAll(parts)
owg.Go(func() error {
throt := throttle.New(throttle.OptCloseChan(o.ctx.Done()))
resChan := make(chan types.Response)
// Try until success or shutdown.
for {
select {
case o.outputTSChans[i] <- types.NewTransaction(msgCopy, resChan):
case <-o.ctx.Done():
return types.ErrTypeClosed
}
select {
case res := <-resChan:
if res.Error() != nil {
o.logger.Errorf("Failed to dispatch switch message: %v\n", res.Error())
o.mOutputErr.Incr(1)
if !throt.Retry() {
return types.ErrTypeClosed
}
} else {
o.mMsgSnt.Incr(1)
return nil
}
case <-o.ctx.Done():
return types.ErrTypeClosed
}
}
})
}
return owg.Wait()
}
func (o *Switch) dispatchNoRetries(group *imessage.SortGroup, sourceMessage types.Message, outputTargets [][]types.Part) error {
var wg sync.WaitGroup
var setErr func(error)
var setErrForPart func(types.Part, error)
var getErr func() error
{
var generalErr error
var batchErr *batch.Error
var errLock sync.Mutex
setErr = func(err error) {
if err == nil {
return
}
errLock.Lock()
generalErr = err
errLock.Unlock()
}
setErrForPart = func(part types.Part, err error) {
if err == nil {
return
}
errLock.Lock()
defer errLock.Unlock()
index := group.GetIndex(part)
if index == -1 {
generalErr = err
return
}
if batchErr == nil {
batchErr = batch.NewError(sourceMessage, err)
}
batchErr.Failed(index, err)
}
getErr = func() error {
if batchErr != nil {
return batchErr
}
return generalErr
}
}
for target, parts := range outputTargets {
if len(parts) == 0 {
continue
}
wg.Add(1)
msgCopy, i := message.New(nil), target
msgCopy.SetAll(parts)
go func() {
defer wg.Done()
resChan := make(chan types.Response)
select {
case o.outputTSChans[i] <- types.NewTransaction(msgCopy, resChan):
case <-o.ctx.Done():
setErr(types.ErrTypeClosed)
return
}
select {
case res := <-resChan:
if res.Error() != nil {
o.mOutputErr.Incr(1)
if bErr, ok := res.Error().(*batch.Error); ok {
bErr.WalkParts(func(i int, p types.Part, e error) bool {
if e != nil {
setErrForPart(p, e)
}
return true
})
} else {
msgCopy.Iter(func(i int, p types.Part) error {
setErrForPart(p, res.Error())
return nil
})
}
} else {
o.mMsgSnt.Incr(1)
}
case <-o.ctx.Done():
setErr(types.ErrTypeClosed)
}
}()
}
wg.Wait()
return getErr()
}
// loop is an internal loop that brokers incoming messages to many outputs.
func (o *Switch) loop() {
var wg sync.WaitGroup
defer func() {
wg.Wait()
for i, output := range o.outputs {
output.CloseAsync()
close(o.outputTSChans[i])
}
for _, output := range o.outputs {
_ = output.WaitForClose(shutdown.MaximumShutdownWait())
}
close(o.closedChan)
}()
sendLoop := func() {
defer wg.Done()
for {
var ts types.Transaction
var open bool
select {
case ts, open = <-o.transactions:
if !open {
return
}
case <-o.ctx.Done():
return
}
o.mMsgRcvd.Incr(1)
group, trackedMsg := imessage.NewSortGroup(ts.Payload)
outputTargets := make([][]types.Part, len(o.checks))
if checksErr := trackedMsg.Iter(func(i int, p types.Part) error {
routedAtLeastOnce := false
for j, exe := range o.checks {
test := true
if exe != nil {
var err error
if test, err = exe.QueryPart(i, trackedMsg); err != nil {
test = false
o.logger.Errorf("Failed to test case %v: %v\n", j, err)
}
}
if test {
routedAtLeastOnce = true
outputTargets[j] = append(outputTargets[j], p.Copy())
if !o.continues[j] {
return nil
}
}
}
if !routedAtLeastOnce && o.strictMode {
return ErrSwitchNoConditionMet
}
return nil
}); checksErr != nil {
select {
case ts.ResponseChan <- response.NewError(checksErr):
case <-o.ctx.Done():
return
}
continue
}
var resErr error
if o.retryUntilSuccess {
resErr = o.dispatchRetryOnErr(outputTargets)
} else {
resErr = o.dispatchNoRetries(group, trackedMsg, outputTargets)
}
var oResponse types.Response = response.NewAck()
if resErr != nil {
oResponse = response.NewError(resErr)
}
select {
case ts.ResponseChan <- oResponse:
case <-o.ctx.Done():
return
}
}
}
// Max in flight
for i := 0; i < o.maxInFlight; i++ {
wg.Add(1)
go sendLoop()
}
}
// CloseAsync shuts down the Switch broker and stops processing requests.
func (o *Switch) CloseAsync() {
o.close()
}
// WaitForClose blocks until the Switch broker has closed down.
func (o *Switch) WaitForClose(timeout time.Duration) error {
select {
case <-o.closedChan:
case <-time.After(timeout):
return types.ErrTimeout
}
return nil
}
//------------------------------------------------------------------------------ | }
if len(cConf.Check) > 0 {
if o.checks[i], err = interop.NewBloblangMapping(mgr, cConf.Check); err != nil {
return nil, fmt.Errorf("failed to parse case '%v' check mapping: %v", i, err)
} | random_line_split |
HAPServiceNode2.ts | import { logger } from '@nrchkb/logger'
import { uuid } from 'hap-nodejs'
import { NodeAPI } from 'node-red'
import NRCHKBError from './NRCHKBError'
import { FlowTypeType } from './types/FlowType'
import HAPHostNodeType from './types/HAPHostNodeType'
import HAPService2ConfigType from './types/HAPService2ConfigType'
import HAPService2NodeType from './types/HAPService2NodeType'
import HostType from './types/HostType'
import { NodeStatusUtils } from './utils/NodeStatusUtils'
module.exports = (RED: NodeAPI) => {
/**
* Config override when user created services in old NRCHKB version
*/
const nrchkbConfigCompatibilityOverride = function (
this: HAPService2NodeType
) {
const self = this
const log = logger('NRCHKB', 'HAPServiceNode2', self.config.name, self)
if (self.config.isParent === undefined) {
log.trace(
`nrchkbConfigCompatibilityOverride => self.config.isParent=${self.config.isParent} value changed to true`
)
// Services created in pre linked services era where working in 1.2 but due to more typescript in 1.3+ it started to cause some errors
self.config.isParent = true
}
if (self.config.hostType === undefined) {
// When moving from 1.2 to 1.3 hostType is not defined on homekit-service
log.trace(
`nrchkbConfigCompatibilityOverride => self.config.hostType=${self.config.hostType} value changed to HostType.BRIDGE`
)
self.config.hostType = HostType.BRIDGE
}
}
const preInit = function (
this: HAPService2NodeType,
config: HAPService2ConfigType
) {
const self = this
self.nodeStatusUtils = new NodeStatusUtils(self)
self.config = config
self.name = self.config.name
const log = logger('NRCHKB', 'HAPServiceNode2', self.config.name, self)
self.RED = RED
self.publishTimers = {}
nrchkbConfigCompatibilityOverride.call(self)
RED.nodes.createNode(self, self.config)
const ServiceUtils = require('./utils/ServiceUtils2')(self)
new Promise<HAPService2ConfigType>((resolve) => {
if (self.config.waitForSetupMsg) {
log.debug(
'Waiting for Setup message. It should be of format {"payload":{"nrchkb":{"setup":{}}}}'
)
self.setupDone = false
self.nodeStatusUtils.setStatus({
fill: 'blue',
shape: 'dot',
text: 'Waiting for Setup',
})
self.handleWaitForSetup = (msg: Record<string, unknown>) =>
ServiceUtils.handleWaitForSetup(self.config, msg, resolve)
self.on('input', self.handleWaitForSetup)
} else {
resolve(self.config)
}
})
.then((newConfig) => {
init.call(self, newConfig)
})
.catch((error: any) => {
log.error(`Error while starting Service due to ${error}`)
})
}
const init = function (
this: HAPService2NodeType,
config: HAPService2ConfigType
) {
const self = this
self.config = config
const log = logger('NRCHKB', 'HAPServiceNode2', self.config.name, self)
const ServiceUtils = require('./utils/ServiceUtils2')(self)
if (self.config.isParent) {
log.debug('Starting Parent Service')
configure.call(self)
self.configured = true
self.reachable = true
} else {
const serviceType =
config.serviceName === 'CameraControl' ? 'Camera' : 'Linked'
ServiceUtils.waitForParent()
.then(() => {
log.debug(`Starting ${serviceType} Service`)
configure.call(self)
self.configured = true
})
.catch((error: any) => {
log.error(
`Error while starting ${serviceType} Service due to ${error}`
)
})
}
}
const configure = function (this: HAPService2NodeType) {
const self = this
const log = logger('NRCHKB', 'HAPServiceNode2', self.config.name, self)
const Utils = require('./utils')(self)
const AccessoryUtils = Utils.AccessoryUtils
const BridgeUtils = Utils.BridgeUtils
const CharacteristicUtils = require('./utils/CharacteristicUtils2')(
self
)
const ServiceUtils = require('./utils/ServiceUtils2')(self)
let parentNode: HAPService2NodeType
if (self.config.isParent) {
const hostId =
self.config.hostType == HostType.BRIDGE
? self.config.bridge
: self.config.accessoryId
self.hostNode = RED.nodes.getNode(hostId) as HAPHostNodeType
if (!self.hostNode) {
log.error('Host Node not found', false)
throw new NRCHKBError('Host Node not found')
}
self.childNodes = []
self.childNodes.push(self)
} else {
// Retrieve parent service node
parentNode = RED.nodes.getNode(
self.config.parentService
) as HAPService2NodeType
if (!parentNode) {
log.error('Parent Node not assigned', false)
throw new NRCHKBError('Parent Node not assigned')
}
self.parentNode = parentNode
self.parentService = self.parentNode.service
if (!self.parentService) {
log.error('Parent Service not assigned', false)
throw new NRCHKBError('Parent Service not assigned')
}
self.hostNode = self.parentNode.hostNode
self.parentNode.childNodes?.push(self) |
// Service node properties
self.name = self.config.name
// Find a unique identifier for the current service
if (
self.hasOwnProperty('_flow') &&
self.hasOwnProperty('_alias') &&
self._flow.hasOwnProperty('TYPE') &&
FlowTypeType.Subflow == self._flow.TYPE
) {
// For subflows, use the service node identifier from the subflow template
// plus the full path from the subflow node identifier to the subflow.
self.uniqueIdentifier = self._alias + '/' + self._flow.path
} else {
// For top level flows, use the node identifier
self.uniqueIdentifier = self.id
}
// Generate UUID from unique identifier
const subtypeUUID = uuid.generate(self.uniqueIdentifier)
// Look for existing Accessory or create a new one
if (self.config.hostType == HostType.BRIDGE) {
if (self.config.isParent) {
// According to the HomeKit Accessory Protocol Specification the value
// of the fields Name, Manufacturer, Serial Number and Model must not
// change throughout the lifetime of an accessory. Because of that the
// accessory UUID will be generated based on that data to ensure that
// a new accessory will be created if any of those configuration values
// changes.
const accessoryUUID = uuid.generate(
'A' +
self.uniqueIdentifier +
self.name +
self.config.manufacturer +
self.config.serialNo +
self.config.model
)
self.accessory = AccessoryUtils.getOrCreate(
self.hostNode.host,
{
name: self.name,
UUID: accessoryUUID,
manufacturer: self.config.manufacturer,
serialNo: self.config.serialNo,
model: self.config.model,
firmwareRev: self.config.firmwareRev,
hardwareRev: self.config.hardwareRev,
softwareRev: self.config.softwareRev,
},
subtypeUUID // subtype of the primary service for identification
)
//Respond to identify
self.onIdentify = AccessoryUtils.onIdentify
self.accessory.on('identify', self.onIdentify)
}
} else {
// We are using Standalone Accessory mode so no need to create new Accessory as we have "host" already
log.debug('Binding Service accessory as Standalone Accessory')
self.accessory = self.hostNode.host
}
// Look for existing Service or create a new one
self.service = ServiceUtils.getOrCreate(
self.accessory,
{
name: self.name,
UUID: subtypeUUID,
serviceName: self.config.serviceName,
config: self.config,
},
self.parentService
)
self.characteristicProperties = CharacteristicUtils.load(
self.service,
self.config
)
if (self.config.isParent) {
BridgeUtils.delayedPublish(self)
}
// The pinCode should be shown to the user until interaction with iOS
// client starts
self.nodeStatusUtils.setStatus({
fill: 'yellow',
shape: 'ring',
text: self.hostNode.config.pinCode,
})
// Emit message when value changes
// service.on("characteristic-change", ServiceUtils.onCharacteristicChange);
// Subscribe to set and get on characteristics for that service and get
// list of all supported
self.supported = CharacteristicUtils.subscribeAndGetSupported(
self.service
)
// Respond to inputs
self.on('input', ServiceUtils.onInput)
self.on('close', ServiceUtils.onClose)
}
return {
preInit,
init,
}
} |
self.accessory = self.parentNode.accessory
} | random_line_split |
HAPServiceNode2.ts | import { logger } from '@nrchkb/logger'
import { uuid } from 'hap-nodejs'
import { NodeAPI } from 'node-red'
import NRCHKBError from './NRCHKBError'
import { FlowTypeType } from './types/FlowType'
import HAPHostNodeType from './types/HAPHostNodeType'
import HAPService2ConfigType from './types/HAPService2ConfigType'
import HAPService2NodeType from './types/HAPService2NodeType'
import HostType from './types/HostType'
import { NodeStatusUtils } from './utils/NodeStatusUtils'
module.exports = (RED: NodeAPI) => {
/**
* Config override when user created services in old NRCHKB version
*/
const nrchkbConfigCompatibilityOverride = function (
this: HAPService2NodeType
) {
const self = this
const log = logger('NRCHKB', 'HAPServiceNode2', self.config.name, self)
if (self.config.isParent === undefined) {
log.trace(
`nrchkbConfigCompatibilityOverride => self.config.isParent=${self.config.isParent} value changed to true`
)
// Services created in pre linked services era where working in 1.2 but due to more typescript in 1.3+ it started to cause some errors
self.config.isParent = true
}
if (self.config.hostType === undefined) {
// When moving from 1.2 to 1.3 hostType is not defined on homekit-service
log.trace(
`nrchkbConfigCompatibilityOverride => self.config.hostType=${self.config.hostType} value changed to HostType.BRIDGE`
)
self.config.hostType = HostType.BRIDGE
}
}
const preInit = function (
this: HAPService2NodeType,
config: HAPService2ConfigType
) {
const self = this
self.nodeStatusUtils = new NodeStatusUtils(self)
self.config = config
self.name = self.config.name
const log = logger('NRCHKB', 'HAPServiceNode2', self.config.name, self)
self.RED = RED
self.publishTimers = {}
nrchkbConfigCompatibilityOverride.call(self)
RED.nodes.createNode(self, self.config)
const ServiceUtils = require('./utils/ServiceUtils2')(self)
new Promise<HAPService2ConfigType>((resolve) => {
if (self.config.waitForSetupMsg) | else {
resolve(self.config)
}
})
.then((newConfig) => {
init.call(self, newConfig)
})
.catch((error: any) => {
log.error(`Error while starting Service due to ${error}`)
})
}
const init = function (
this: HAPService2NodeType,
config: HAPService2ConfigType
) {
const self = this
self.config = config
const log = logger('NRCHKB', 'HAPServiceNode2', self.config.name, self)
const ServiceUtils = require('./utils/ServiceUtils2')(self)
if (self.config.isParent) {
log.debug('Starting Parent Service')
configure.call(self)
self.configured = true
self.reachable = true
} else {
const serviceType =
config.serviceName === 'CameraControl' ? 'Camera' : 'Linked'
ServiceUtils.waitForParent()
.then(() => {
log.debug(`Starting ${serviceType} Service`)
configure.call(self)
self.configured = true
})
.catch((error: any) => {
log.error(
`Error while starting ${serviceType} Service due to ${error}`
)
})
}
}
const configure = function (this: HAPService2NodeType) {
const self = this
const log = logger('NRCHKB', 'HAPServiceNode2', self.config.name, self)
const Utils = require('./utils')(self)
const AccessoryUtils = Utils.AccessoryUtils
const BridgeUtils = Utils.BridgeUtils
const CharacteristicUtils = require('./utils/CharacteristicUtils2')(
self
)
const ServiceUtils = require('./utils/ServiceUtils2')(self)
let parentNode: HAPService2NodeType
if (self.config.isParent) {
const hostId =
self.config.hostType == HostType.BRIDGE
? self.config.bridge
: self.config.accessoryId
self.hostNode = RED.nodes.getNode(hostId) as HAPHostNodeType
if (!self.hostNode) {
log.error('Host Node not found', false)
throw new NRCHKBError('Host Node not found')
}
self.childNodes = []
self.childNodes.push(self)
} else {
// Retrieve parent service node
parentNode = RED.nodes.getNode(
self.config.parentService
) as HAPService2NodeType
if (!parentNode) {
log.error('Parent Node not assigned', false)
throw new NRCHKBError('Parent Node not assigned')
}
self.parentNode = parentNode
self.parentService = self.parentNode.service
if (!self.parentService) {
log.error('Parent Service not assigned', false)
throw new NRCHKBError('Parent Service not assigned')
}
self.hostNode = self.parentNode.hostNode
self.parentNode.childNodes?.push(self)
self.accessory = self.parentNode.accessory
}
// Service node properties
self.name = self.config.name
// Find a unique identifier for the current service
if (
self.hasOwnProperty('_flow') &&
self.hasOwnProperty('_alias') &&
self._flow.hasOwnProperty('TYPE') &&
FlowTypeType.Subflow == self._flow.TYPE
) {
// For subflows, use the service node identifier from the subflow template
// plus the full path from the subflow node identifier to the subflow.
self.uniqueIdentifier = self._alias + '/' + self._flow.path
} else {
// For top level flows, use the node identifier
self.uniqueIdentifier = self.id
}
// Generate UUID from unique identifier
const subtypeUUID = uuid.generate(self.uniqueIdentifier)
// Look for existing Accessory or create a new one
if (self.config.hostType == HostType.BRIDGE) {
if (self.config.isParent) {
// According to the HomeKit Accessory Protocol Specification the value
// of the fields Name, Manufacturer, Serial Number and Model must not
// change throughout the lifetime of an accessory. Because of that the
// accessory UUID will be generated based on that data to ensure that
// a new accessory will be created if any of those configuration values
// changes.
const accessoryUUID = uuid.generate(
'A' +
self.uniqueIdentifier +
self.name +
self.config.manufacturer +
self.config.serialNo +
self.config.model
)
self.accessory = AccessoryUtils.getOrCreate(
self.hostNode.host,
{
name: self.name,
UUID: accessoryUUID,
manufacturer: self.config.manufacturer,
serialNo: self.config.serialNo,
model: self.config.model,
firmwareRev: self.config.firmwareRev,
hardwareRev: self.config.hardwareRev,
softwareRev: self.config.softwareRev,
},
subtypeUUID // subtype of the primary service for identification
)
//Respond to identify
self.onIdentify = AccessoryUtils.onIdentify
self.accessory.on('identify', self.onIdentify)
}
} else {
// We are using Standalone Accessory mode so no need to create new Accessory as we have "host" already
log.debug('Binding Service accessory as Standalone Accessory')
self.accessory = self.hostNode.host
}
// Look for existing Service or create a new one
self.service = ServiceUtils.getOrCreate(
self.accessory,
{
name: self.name,
UUID: subtypeUUID,
serviceName: self.config.serviceName,
config: self.config,
},
self.parentService
)
self.characteristicProperties = CharacteristicUtils.load(
self.service,
self.config
)
if (self.config.isParent) {
BridgeUtils.delayedPublish(self)
}
// The pinCode should be shown to the user until interaction with iOS
// client starts
self.nodeStatusUtils.setStatus({
fill: 'yellow',
shape: 'ring',
text: self.hostNode.config.pinCode,
})
// Emit message when value changes
// service.on("characteristic-change", ServiceUtils.onCharacteristicChange);
// Subscribe to set and get on characteristics for that service and get
// list of all supported
self.supported = CharacteristicUtils.subscribeAndGetSupported(
self.service
)
// Respond to inputs
self.on('input', ServiceUtils.onInput)
self.on('close', ServiceUtils.onClose)
}
return {
preInit,
init,
}
}
| {
log.debug(
'Waiting for Setup message. It should be of format {"payload":{"nrchkb":{"setup":{}}}}'
)
self.setupDone = false
self.nodeStatusUtils.setStatus({
fill: 'blue',
shape: 'dot',
text: 'Waiting for Setup',
})
self.handleWaitForSetup = (msg: Record<string, unknown>) =>
ServiceUtils.handleWaitForSetup(self.config, msg, resolve)
self.on('input', self.handleWaitForSetup)
} | conditional_block |
fvp.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: fvp.proto
package fvp
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type SendMsg struct {
KnownStates []*SendMsg_State `protobuf:"bytes,1,rep,name=knownStates,proto3" json:"knownStates,omitempty"`
Term int32 `protobuf:"varint,2,opt,name=term,proto3" json:"term,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg) Reset() { *m = SendMsg{} }
func (m *SendMsg) String() string { return proto.CompactTextString(m) }
func (*SendMsg) ProtoMessage() {}
func (*SendMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0}
}
func (m *SendMsg) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg.Unmarshal(m, b)
}
func (m *SendMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg.Marshal(b, m, deterministic)
}
func (m *SendMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg.Merge(m, src)
}
func (m *SendMsg) XXX_Size() int {
return xxx_messageInfo_SendMsg.Size(m)
}
func (m *SendMsg) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg proto.InternalMessageInfo
func (m *SendMsg) GetKnownStates() []*SendMsg_State {
if m != nil {
return m.KnownStates
}
return nil
}
func (m *SendMsg) GetTerm() int32 {
if m != nil {
return m.Term
}
return 0
}
type SendMsg_Slice struct {
Nodes []string `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg_Slice) Reset() { *m = SendMsg_Slice{} }
func (m *SendMsg_Slice) String() string { return proto.CompactTextString(m) }
func (*SendMsg_Slice) ProtoMessage() {}
func (*SendMsg_Slice) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0, 0}
}
func (m *SendMsg_Slice) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg_Slice.Unmarshal(m, b)
}
func (m *SendMsg_Slice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg_Slice.Marshal(b, m, deterministic)
}
func (m *SendMsg_Slice) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg_Slice.Merge(m, src)
}
func (m *SendMsg_Slice) XXX_Size() int {
return xxx_messageInfo_SendMsg_Slice.Size(m)
}
func (m *SendMsg_Slice) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg_Slice.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg_Slice proto.InternalMessageInfo
func (m *SendMsg_Slice) GetNodes() []string {
if m != nil {
return m.Nodes
}
return nil
}
type SendMsg_State struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
VotedFor []string `protobuf:"bytes,2,rep,name=votedFor,proto3" json:"votedFor,omitempty"`
Accepted []string `protobuf:"bytes,3,rep,name=accepted,proto3" json:"accepted,omitempty"`
Confirmed []string `protobuf:"bytes,4,rep,name=confirmed,proto3" json:"confirmed,omitempty"`
QuorumSlices []*SendMsg_Slice `protobuf:"bytes,5,rep,name=quorumSlices,proto3" json:"quorumSlices,omitempty"`
Counter int32 `protobuf:"varint,6,opt,name=counter,proto3" json:"counter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg_State) Reset() { *m = SendMsg_State{} }
func (m *SendMsg_State) String() string { return proto.CompactTextString(m) }
func (*SendMsg_State) ProtoMessage() {}
func (*SendMsg_State) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0, 1}
}
func (m *SendMsg_State) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg_State.Unmarshal(m, b)
}
func (m *SendMsg_State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg_State.Marshal(b, m, deterministic)
}
func (m *SendMsg_State) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg_State.Merge(m, src)
}
func (m *SendMsg_State) XXX_Size() int {
return xxx_messageInfo_SendMsg_State.Size(m)
}
func (m *SendMsg_State) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg_State.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg_State proto.InternalMessageInfo
func (m *SendMsg_State) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *SendMsg_State) GetVotedFor() []string {
if m != nil {
return m.VotedFor
}
return nil
}
func (m *SendMsg_State) GetAccepted() []string {
if m != nil {
return m.Accepted
}
return nil
}
func (m *SendMsg_State) GetConfirmed() []string {
if m != nil {
return m.Confirmed
}
return nil
}
func (m *SendMsg_State) GetQuorumSlices() []*SendMsg_Slice {
if m != nil {
return m.QuorumSlices
}
return nil
}
func (m *SendMsg_State) | () int32 {
if m != nil {
return m.Counter
}
return 0
}
type EmptyMessage struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EmptyMessage) Reset() { *m = EmptyMessage{} }
func (m *EmptyMessage) String() string { return proto.CompactTextString(m) }
func (*EmptyMessage) ProtoMessage() {}
func (*EmptyMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{1}
}
func (m *EmptyMessage) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EmptyMessage.Unmarshal(m, b)
}
func (m *EmptyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EmptyMessage.Marshal(b, m, deterministic)
}
func (m *EmptyMessage) XXX_Merge(src proto.Message) {
xxx_messageInfo_EmptyMessage.Merge(m, src)
}
func (m *EmptyMessage) XXX_Size() int {
return xxx_messageInfo_EmptyMessage.Size(m)
}
func (m *EmptyMessage) XXX_DiscardUnknown() {
xxx_messageInfo_EmptyMessage.DiscardUnknown(m)
}
var xxx_messageInfo_EmptyMessage proto.InternalMessageInfo
func init() {
proto.RegisterType((*SendMsg)(nil), "fvp.SendMsg")
proto.RegisterType((*SendMsg_Slice)(nil), "fvp.SendMsg.Slice")
proto.RegisterType((*SendMsg_State)(nil), "fvp.SendMsg.State")
proto.RegisterType((*EmptyMessage)(nil), "fvp.EmptyMessage")
}
func init() { proto.RegisterFile("fvp.proto", fileDescriptor_9e36e933c92912d0) }
var fileDescriptor_9e36e933c92912d0 = []byte{
// 272 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0x84, 0x30,
0x10, 0xc6, 0xe5, 0xef, 0xca, 0x2c, 0xd9, 0xc4, 0x89, 0x87, 0x86, 0x68, 0x42, 0x38, 0xe1, 0x85,
0xc3, 0xae, 0xf1, 0x0d, 0xf4, 0xb6, 0x17, 0x78, 0x02, 0xa4, 0xc3, 0x86, 0x28, 0x2d, 0xb6, 0x05,
0xe3, 0x7b, 0x79, 0xf7, 0xd5, 0x0c, 0xdd, 0x5d, 0xc5, 0xc4, 0x5b, 0x7f, 0xfd, 0xcd, 0xf7, 0xa5,
0x93, 0x42, 0xd4, 0x4e, 0x43, 0x31, 0x28, 0x69, 0x24, 0x7a, 0xed, 0x34, 0x64, 0x9f, 0x2e, 0xac,
0x2a, 0x12, 0x7c, 0xaf, 0x0f, 0x78, 0x0f, 0xeb, 0x17, 0x21, 0xdf, 0x45, 0x65, 0x6a, 0x43, 0x9a,
0x39, 0xa9, 0x97, 0xaf, 0xb7, 0x58, 0xcc, 0x89, 0xd3, 0x48, 0x61, 0x55, 0xb9, 0x1c, 0x43, 0x04,
0xdf, 0x90, 0xea, 0x99, 0x9b, 0x3a, 0x79, 0x50, 0xda, 0x73, 0x72, 0x0b, 0x41, 0xf5, 0xda, 0x35,
0x84, 0xd7, 0x10, 0x08, 0xc9, 0x4f, 0x65, 0x51, 0x79, 0x84, 0xe4, 0xcb, 0x81, 0xc0, 0xa6, 0x71,
0x03, 0x6e, 0xc7, 0x99, 0x93, 0x3a, 0x79, 0x54, 0xba, 0x1d, 0xc7, 0x04, 0x2e, 0x27, 0x69, 0x88,
0x3f, 0x49, 0xc5, 0x5c, 0x1b, 0xf9, 0xe1, 0xd9, 0xd5, 0x4d, 0x43, 0x83, 0x21, 0xce, 0xbc, 0xa3,
0x3b, 0x33, 0xde, 0x40, 0xd4, 0x48, 0xd1, 0x76, 0xaa, 0x27, 0xce, 0x7c, 0x2b, 0x7f, 0x2f, 0xf0,
0x01, 0xe2, 0xb7, 0x51, 0xaa, 0xb1, 0xb7, 0x8f, 0xd2, 0x2c, 0xf8, 0x6f, 0xb3, 0x59, 0x95, 0x7f,
0xe6, 0x90, 0xc1, 0xaa, 0x91, 0xa3, 0x30, 0xa4, 0x58, 0x68, 0xb7, 0x3b, 0x63, 0xb6, 0x81, 0xf8,
0xb1, 0x1f, 0xcc, 0xc7, 0x9e, 0xb4, 0xae, 0x0f, 0xb4, 0xdd, 0x41, 0x58, 0x91, 0x9a, 0x48, 0xe1,
0x1d, 0xf8, 0x73, 0x25, 0xc6, 0xcb, 0xf6, 0xe4, 0xca, 0xd2, 0x32, 0x92, 0x5d, 0x3c, 0x87, 0xf6,
0x1f, 0x76, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x29, 0x50, 0xd3, 0x94, 0x01, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ServerClient is the client API for Server service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ServerClient interface {
Send(ctx context.Context, in *SendMsg, opts ...grpc.CallOption) (*EmptyMessage, error)
}
type serverClient struct {
cc *grpc.ClientConn
}
func NewServerClient(cc *grpc.ClientConn) ServerClient {
return &serverClient{cc}
}
func (c *serverClient) Send(ctx context.Context, in *SendMsg, opts ...grpc.CallOption) (*EmptyMessage, error) {
out := new(EmptyMessage)
err := c.cc.Invoke(ctx, "/fvp.Server/Send", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ServerServer is the server API for Server service.
type ServerServer interface {
Send(context.Context, *SendMsg) (*EmptyMessage, error)
}
// UnimplementedServerServer can be embedded to have forward compatible implementations.
type UnimplementedServerServer struct {
}
func (*UnimplementedServerServer) Send(ctx context.Context, req *SendMsg) (*EmptyMessage, error) {
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
}
func RegisterServerServer(s *grpc.Server, srv ServerServer) {
s.RegisterService(&_Server_serviceDesc, srv)
}
func _Server_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendMsg)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ServerServer).Send(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/fvp.Server/Send",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ServerServer).Send(ctx, req.(*SendMsg))
}
return interceptor(ctx, in, info, handler)
}
var _Server_serviceDesc = grpc.ServiceDesc{
ServiceName: "fvp.Server",
HandlerType: (*ServerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Send",
Handler: _Server_Send_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "fvp.proto",
}
| GetCounter | identifier_name |
fvp.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: fvp.proto
package fvp
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type SendMsg struct {
KnownStates []*SendMsg_State `protobuf:"bytes,1,rep,name=knownStates,proto3" json:"knownStates,omitempty"`
Term int32 `protobuf:"varint,2,opt,name=term,proto3" json:"term,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg) Reset() |
func (m *SendMsg) String() string { return proto.CompactTextString(m) }
func (*SendMsg) ProtoMessage() {}
func (*SendMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0}
}
func (m *SendMsg) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg.Unmarshal(m, b)
}
func (m *SendMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg.Marshal(b, m, deterministic)
}
func (m *SendMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg.Merge(m, src)
}
func (m *SendMsg) XXX_Size() int {
return xxx_messageInfo_SendMsg.Size(m)
}
func (m *SendMsg) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg proto.InternalMessageInfo
func (m *SendMsg) GetKnownStates() []*SendMsg_State {
if m != nil {
return m.KnownStates
}
return nil
}
func (m *SendMsg) GetTerm() int32 {
if m != nil {
return m.Term
}
return 0
}
type SendMsg_Slice struct {
Nodes []string `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg_Slice) Reset() { *m = SendMsg_Slice{} }
func (m *SendMsg_Slice) String() string { return proto.CompactTextString(m) }
func (*SendMsg_Slice) ProtoMessage() {}
func (*SendMsg_Slice) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0, 0}
}
func (m *SendMsg_Slice) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg_Slice.Unmarshal(m, b)
}
func (m *SendMsg_Slice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg_Slice.Marshal(b, m, deterministic)
}
func (m *SendMsg_Slice) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg_Slice.Merge(m, src)
}
func (m *SendMsg_Slice) XXX_Size() int {
return xxx_messageInfo_SendMsg_Slice.Size(m)
}
func (m *SendMsg_Slice) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg_Slice.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg_Slice proto.InternalMessageInfo
func (m *SendMsg_Slice) GetNodes() []string {
if m != nil {
return m.Nodes
}
return nil
}
type SendMsg_State struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
VotedFor []string `protobuf:"bytes,2,rep,name=votedFor,proto3" json:"votedFor,omitempty"`
Accepted []string `protobuf:"bytes,3,rep,name=accepted,proto3" json:"accepted,omitempty"`
Confirmed []string `protobuf:"bytes,4,rep,name=confirmed,proto3" json:"confirmed,omitempty"`
QuorumSlices []*SendMsg_Slice `protobuf:"bytes,5,rep,name=quorumSlices,proto3" json:"quorumSlices,omitempty"`
Counter int32 `protobuf:"varint,6,opt,name=counter,proto3" json:"counter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg_State) Reset() { *m = SendMsg_State{} }
func (m *SendMsg_State) String() string { return proto.CompactTextString(m) }
func (*SendMsg_State) ProtoMessage() {}
func (*SendMsg_State) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0, 1}
}
func (m *SendMsg_State) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg_State.Unmarshal(m, b)
}
func (m *SendMsg_State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg_State.Marshal(b, m, deterministic)
}
func (m *SendMsg_State) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg_State.Merge(m, src)
}
func (m *SendMsg_State) XXX_Size() int {
return xxx_messageInfo_SendMsg_State.Size(m)
}
func (m *SendMsg_State) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg_State.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg_State proto.InternalMessageInfo
func (m *SendMsg_State) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *SendMsg_State) GetVotedFor() []string {
if m != nil {
return m.VotedFor
}
return nil
}
func (m *SendMsg_State) GetAccepted() []string {
if m != nil {
return m.Accepted
}
return nil
}
func (m *SendMsg_State) GetConfirmed() []string {
if m != nil {
return m.Confirmed
}
return nil
}
func (m *SendMsg_State) GetQuorumSlices() []*SendMsg_Slice {
if m != nil {
return m.QuorumSlices
}
return nil
}
func (m *SendMsg_State) GetCounter() int32 {
if m != nil {
return m.Counter
}
return 0
}
type EmptyMessage struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EmptyMessage) Reset() { *m = EmptyMessage{} }
func (m *EmptyMessage) String() string { return proto.CompactTextString(m) }
func (*EmptyMessage) ProtoMessage() {}
func (*EmptyMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{1}
}
func (m *EmptyMessage) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EmptyMessage.Unmarshal(m, b)
}
func (m *EmptyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EmptyMessage.Marshal(b, m, deterministic)
}
func (m *EmptyMessage) XXX_Merge(src proto.Message) {
xxx_messageInfo_EmptyMessage.Merge(m, src)
}
func (m *EmptyMessage) XXX_Size() int {
return xxx_messageInfo_EmptyMessage.Size(m)
}
func (m *EmptyMessage) XXX_DiscardUnknown() {
xxx_messageInfo_EmptyMessage.DiscardUnknown(m)
}
var xxx_messageInfo_EmptyMessage proto.InternalMessageInfo
func init() {
proto.RegisterType((*SendMsg)(nil), "fvp.SendMsg")
proto.RegisterType((*SendMsg_Slice)(nil), "fvp.SendMsg.Slice")
proto.RegisterType((*SendMsg_State)(nil), "fvp.SendMsg.State")
proto.RegisterType((*EmptyMessage)(nil), "fvp.EmptyMessage")
}
func init() { proto.RegisterFile("fvp.proto", fileDescriptor_9e36e933c92912d0) }
var fileDescriptor_9e36e933c92912d0 = []byte{
// 272 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0x84, 0x30,
0x10, 0xc6, 0xe5, 0xef, 0xca, 0x2c, 0xd9, 0xc4, 0x89, 0x87, 0x86, 0x68, 0x42, 0x38, 0xe1, 0x85,
0xc3, 0xae, 0xf1, 0x0d, 0xf4, 0xb6, 0x17, 0x78, 0x02, 0xa4, 0xc3, 0x86, 0x28, 0x2d, 0xb6, 0x05,
0xe3, 0x7b, 0x79, 0xf7, 0xd5, 0x0c, 0xdd, 0x5d, 0xc5, 0xc4, 0x5b, 0x7f, 0xfd, 0xcd, 0xf7, 0xa5,
0x93, 0x42, 0xd4, 0x4e, 0x43, 0x31, 0x28, 0x69, 0x24, 0x7a, 0xed, 0x34, 0x64, 0x9f, 0x2e, 0xac,
0x2a, 0x12, 0x7c, 0xaf, 0x0f, 0x78, 0x0f, 0xeb, 0x17, 0x21, 0xdf, 0x45, 0x65, 0x6a, 0x43, 0x9a,
0x39, 0xa9, 0x97, 0xaf, 0xb7, 0x58, 0xcc, 0x89, 0xd3, 0x48, 0x61, 0x55, 0xb9, 0x1c, 0x43, 0x04,
0xdf, 0x90, 0xea, 0x99, 0x9b, 0x3a, 0x79, 0x50, 0xda, 0x73, 0x72, 0x0b, 0x41, 0xf5, 0xda, 0x35,
0x84, 0xd7, 0x10, 0x08, 0xc9, 0x4f, 0x65, 0x51, 0x79, 0x84, 0xe4, 0xcb, 0x81, 0xc0, 0xa6, 0x71,
0x03, 0x6e, 0xc7, 0x99, 0x93, 0x3a, 0x79, 0x54, 0xba, 0x1d, 0xc7, 0x04, 0x2e, 0x27, 0x69, 0x88,
0x3f, 0x49, 0xc5, 0x5c, 0x1b, 0xf9, 0xe1, 0xd9, 0xd5, 0x4d, 0x43, 0x83, 0x21, 0xce, 0xbc, 0xa3,
0x3b, 0x33, 0xde, 0x40, 0xd4, 0x48, 0xd1, 0x76, 0xaa, 0x27, 0xce, 0x7c, 0x2b, 0x7f, 0x2f, 0xf0,
0x01, 0xe2, 0xb7, 0x51, 0xaa, 0xb1, 0xb7, 0x8f, 0xd2, 0x2c, 0xf8, 0x6f, 0xb3, 0x59, 0x95, 0x7f,
0xe6, 0x90, 0xc1, 0xaa, 0x91, 0xa3, 0x30, 0xa4, 0x58, 0x68, 0xb7, 0x3b, 0x63, 0xb6, 0x81, 0xf8,
0xb1, 0x1f, 0xcc, 0xc7, 0x9e, 0xb4, 0xae, 0x0f, 0xb4, 0xdd, 0x41, 0x58, 0x91, 0x9a, 0x48, 0xe1,
0x1d, 0xf8, 0x73, 0x25, 0xc6, 0xcb, 0xf6, 0xe4, 0xca, 0xd2, 0x32, 0x92, 0x5d, 0x3c, 0x87, 0xf6,
0x1f, 0x76, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x29, 0x50, 0xd3, 0x94, 0x01, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ServerClient is the client API for Server service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ServerClient interface {
Send(ctx context.Context, in *SendMsg, opts ...grpc.CallOption) (*EmptyMessage, error)
}
type serverClient struct {
cc *grpc.ClientConn
}
func NewServerClient(cc *grpc.ClientConn) ServerClient {
return &serverClient{cc}
}
func (c *serverClient) Send(ctx context.Context, in *SendMsg, opts ...grpc.CallOption) (*EmptyMessage, error) {
out := new(EmptyMessage)
err := c.cc.Invoke(ctx, "/fvp.Server/Send", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ServerServer is the server API for Server service.
type ServerServer interface {
Send(context.Context, *SendMsg) (*EmptyMessage, error)
}
// UnimplementedServerServer can be embedded to have forward compatible implementations.
type UnimplementedServerServer struct {
}
func (*UnimplementedServerServer) Send(ctx context.Context, req *SendMsg) (*EmptyMessage, error) {
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
}
func RegisterServerServer(s *grpc.Server, srv ServerServer) {
s.RegisterService(&_Server_serviceDesc, srv)
}
func _Server_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendMsg)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ServerServer).Send(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/fvp.Server/Send",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ServerServer).Send(ctx, req.(*SendMsg))
}
return interceptor(ctx, in, info, handler)
}
var _Server_serviceDesc = grpc.ServiceDesc{
ServiceName: "fvp.Server",
HandlerType: (*ServerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Send",
Handler: _Server_Send_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "fvp.proto",
}
| { *m = SendMsg{} } | identifier_body |
fvp.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: fvp.proto
package fvp
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type SendMsg struct {
KnownStates []*SendMsg_State `protobuf:"bytes,1,rep,name=knownStates,proto3" json:"knownStates,omitempty"`
Term int32 `protobuf:"varint,2,opt,name=term,proto3" json:"term,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg) Reset() { *m = SendMsg{} }
func (m *SendMsg) String() string { return proto.CompactTextString(m) }
func (*SendMsg) ProtoMessage() {}
func (*SendMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0}
}
func (m *SendMsg) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg.Unmarshal(m, b)
}
func (m *SendMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg.Marshal(b, m, deterministic)
}
func (m *SendMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg.Merge(m, src)
}
func (m *SendMsg) XXX_Size() int {
return xxx_messageInfo_SendMsg.Size(m)
}
func (m *SendMsg) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg proto.InternalMessageInfo
func (m *SendMsg) GetKnownStates() []*SendMsg_State {
if m != nil {
return m.KnownStates
}
return nil
}
func (m *SendMsg) GetTerm() int32 {
if m != nil {
return m.Term
}
return 0
}
type SendMsg_Slice struct {
Nodes []string `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg_Slice) Reset() { *m = SendMsg_Slice{} }
func (m *SendMsg_Slice) String() string { return proto.CompactTextString(m) }
func (*SendMsg_Slice) ProtoMessage() {}
func (*SendMsg_Slice) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0, 0}
}
func (m *SendMsg_Slice) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg_Slice.Unmarshal(m, b)
}
func (m *SendMsg_Slice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg_Slice.Marshal(b, m, deterministic)
}
func (m *SendMsg_Slice) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg_Slice.Merge(m, src)
}
func (m *SendMsg_Slice) XXX_Size() int {
return xxx_messageInfo_SendMsg_Slice.Size(m)
}
func (m *SendMsg_Slice) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg_Slice.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg_Slice proto.InternalMessageInfo
func (m *SendMsg_Slice) GetNodes() []string {
if m != nil {
return m.Nodes
}
return nil
}
type SendMsg_State struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
VotedFor []string `protobuf:"bytes,2,rep,name=votedFor,proto3" json:"votedFor,omitempty"`
Accepted []string `protobuf:"bytes,3,rep,name=accepted,proto3" json:"accepted,omitempty"`
Confirmed []string `protobuf:"bytes,4,rep,name=confirmed,proto3" json:"confirmed,omitempty"`
QuorumSlices []*SendMsg_Slice `protobuf:"bytes,5,rep,name=quorumSlices,proto3" json:"quorumSlices,omitempty"`
Counter int32 `protobuf:"varint,6,opt,name=counter,proto3" json:"counter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg_State) Reset() { *m = SendMsg_State{} }
func (m *SendMsg_State) String() string { return proto.CompactTextString(m) }
func (*SendMsg_State) ProtoMessage() {}
func (*SendMsg_State) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0, 1}
}
func (m *SendMsg_State) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg_State.Unmarshal(m, b)
}
func (m *SendMsg_State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg_State.Marshal(b, m, deterministic)
}
func (m *SendMsg_State) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg_State.Merge(m, src)
}
func (m *SendMsg_State) XXX_Size() int {
return xxx_messageInfo_SendMsg_State.Size(m)
}
func (m *SendMsg_State) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg_State.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg_State proto.InternalMessageInfo
func (m *SendMsg_State) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *SendMsg_State) GetVotedFor() []string {
if m != nil {
return m.VotedFor
}
return nil
}
func (m *SendMsg_State) GetAccepted() []string {
if m != nil {
return m.Accepted
}
return nil
}
func (m *SendMsg_State) GetConfirmed() []string {
if m != nil {
return m.Confirmed
}
return nil
}
func (m *SendMsg_State) GetQuorumSlices() []*SendMsg_Slice {
if m != nil {
return m.QuorumSlices
}
return nil
}
func (m *SendMsg_State) GetCounter() int32 {
if m != nil {
return m.Counter
}
return 0
}
type EmptyMessage struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EmptyMessage) Reset() { *m = EmptyMessage{} }
func (m *EmptyMessage) String() string { return proto.CompactTextString(m) }
func (*EmptyMessage) ProtoMessage() {}
func (*EmptyMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{1}
}
func (m *EmptyMessage) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EmptyMessage.Unmarshal(m, b)
}
func (m *EmptyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EmptyMessage.Marshal(b, m, deterministic)
}
func (m *EmptyMessage) XXX_Merge(src proto.Message) {
xxx_messageInfo_EmptyMessage.Merge(m, src)
}
func (m *EmptyMessage) XXX_Size() int {
return xxx_messageInfo_EmptyMessage.Size(m)
}
func (m *EmptyMessage) XXX_DiscardUnknown() {
xxx_messageInfo_EmptyMessage.DiscardUnknown(m)
}
var xxx_messageInfo_EmptyMessage proto.InternalMessageInfo
func init() {
proto.RegisterType((*SendMsg)(nil), "fvp.SendMsg")
proto.RegisterType((*SendMsg_Slice)(nil), "fvp.SendMsg.Slice")
proto.RegisterType((*SendMsg_State)(nil), "fvp.SendMsg.State")
proto.RegisterType((*EmptyMessage)(nil), "fvp.EmptyMessage")
}
func init() { proto.RegisterFile("fvp.proto", fileDescriptor_9e36e933c92912d0) }
var fileDescriptor_9e36e933c92912d0 = []byte{
// 272 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0x84, 0x30,
0x10, 0xc6, 0xe5, 0xef, 0xca, 0x2c, 0xd9, 0xc4, 0x89, 0x87, 0x86, 0x68, 0x42, 0x38, 0xe1, 0x85,
0xc3, 0xae, 0xf1, 0x0d, 0xf4, 0xb6, 0x17, 0x78, 0x02, 0xa4, 0xc3, 0x86, 0x28, 0x2d, 0xb6, 0x05,
0xe3, 0x7b, 0x79, 0xf7, 0xd5, 0x0c, 0xdd, 0x5d, 0xc5, 0xc4, 0x5b, 0x7f, 0xfd, 0xcd, 0xf7, 0xa5,
0x93, 0x42, 0xd4, 0x4e, 0x43, 0x31, 0x28, 0x69, 0x24, 0x7a, 0xed, 0x34, 0x64, 0x9f, 0x2e, 0xac,
0x2a, 0x12, 0x7c, 0xaf, 0x0f, 0x78, 0x0f, 0xeb, 0x17, 0x21, 0xdf, 0x45, 0x65, 0x6a, 0x43, 0x9a,
0x39, 0xa9, 0x97, 0xaf, 0xb7, 0x58, 0xcc, 0x89, 0xd3, 0x48, 0x61, 0x55, 0xb9, 0x1c, 0x43, 0x04,
0xdf, 0x90, 0xea, 0x99, 0x9b, 0x3a, 0x79, 0x50, 0xda, 0x73, 0x72, 0x0b, 0x41, 0xf5, 0xda, 0x35,
0x84, 0xd7, 0x10, 0x08, 0xc9, 0x4f, 0x65, 0x51, 0x79, 0x84, 0xe4, 0xcb, 0x81, 0xc0, 0xa6, 0x71,
0x03, 0x6e, 0xc7, 0x99, 0x93, 0x3a, 0x79, 0x54, 0xba, 0x1d, 0xc7, 0x04, 0x2e, 0x27, 0x69, 0x88,
0x3f, 0x49, 0xc5, 0x5c, 0x1b, 0xf9, 0xe1, 0xd9, 0xd5, 0x4d, 0x43, 0x83, 0x21, 0xce, 0xbc, 0xa3,
0x3b, 0x33, 0xde, 0x40, 0xd4, 0x48, 0xd1, 0x76, 0xaa, 0x27, 0xce, 0x7c, 0x2b, 0x7f, 0x2f, 0xf0,
0x01, 0xe2, 0xb7, 0x51, 0xaa, 0xb1, 0xb7, 0x8f, 0xd2, 0x2c, 0xf8, 0x6f, 0xb3, 0x59, 0x95, 0x7f,
0xe6, 0x90, 0xc1, 0xaa, 0x91, 0xa3, 0x30, 0xa4, 0x58, 0x68, 0xb7, 0x3b, 0x63, 0xb6, 0x81, 0xf8,
0xb1, 0x1f, 0xcc, 0xc7, 0x9e, 0xb4, 0xae, 0x0f, 0xb4, 0xdd, 0x41, 0x58, 0x91, 0x9a, 0x48, 0xe1,
0x1d, 0xf8, 0x73, 0x25, 0xc6, 0xcb, 0xf6, 0xe4, 0xca, 0xd2, 0x32, 0x92, 0x5d, 0x3c, 0x87, 0xf6,
0x1f, 0x76, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x29, 0x50, 0xd3, 0x94, 0x01, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ServerClient is the client API for Server service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ServerClient interface {
Send(ctx context.Context, in *SendMsg, opts ...grpc.CallOption) (*EmptyMessage, error)
}
type serverClient struct {
cc *grpc.ClientConn
}
func NewServerClient(cc *grpc.ClientConn) ServerClient {
return &serverClient{cc}
}
func (c *serverClient) Send(ctx context.Context, in *SendMsg, opts ...grpc.CallOption) (*EmptyMessage, error) {
out := new(EmptyMessage)
err := c.cc.Invoke(ctx, "/fvp.Server/Send", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ServerServer is the server API for Server service.
type ServerServer interface {
Send(context.Context, *SendMsg) (*EmptyMessage, error)
}
// UnimplementedServerServer can be embedded to have forward compatible implementations.
type UnimplementedServerServer struct {
}
func (*UnimplementedServerServer) Send(ctx context.Context, req *SendMsg) (*EmptyMessage, error) {
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
}
func RegisterServerServer(s *grpc.Server, srv ServerServer) {
s.RegisterService(&_Server_serviceDesc, srv)
}
func _Server_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendMsg)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ServerServer).Send(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/fvp.Server/Send",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ServerServer).Send(ctx, req.(*SendMsg))
}
return interceptor(ctx, in, info, handler)
}
| Methods: []grpc.MethodDesc{
{
MethodName: "Send",
Handler: _Server_Send_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "fvp.proto",
} | var _Server_serviceDesc = grpc.ServiceDesc{
ServiceName: "fvp.Server",
HandlerType: (*ServerServer)(nil), | random_line_split |
fvp.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: fvp.proto
package fvp
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type SendMsg struct {
KnownStates []*SendMsg_State `protobuf:"bytes,1,rep,name=knownStates,proto3" json:"knownStates,omitempty"`
Term int32 `protobuf:"varint,2,opt,name=term,proto3" json:"term,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg) Reset() { *m = SendMsg{} }
func (m *SendMsg) String() string { return proto.CompactTextString(m) }
func (*SendMsg) ProtoMessage() {}
func (*SendMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0}
}
func (m *SendMsg) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg.Unmarshal(m, b)
}
func (m *SendMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg.Marshal(b, m, deterministic)
}
func (m *SendMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg.Merge(m, src)
}
func (m *SendMsg) XXX_Size() int {
return xxx_messageInfo_SendMsg.Size(m)
}
func (m *SendMsg) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg proto.InternalMessageInfo
func (m *SendMsg) GetKnownStates() []*SendMsg_State {
if m != nil {
return m.KnownStates
}
return nil
}
func (m *SendMsg) GetTerm() int32 {
if m != nil {
return m.Term
}
return 0
}
type SendMsg_Slice struct {
Nodes []string `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg_Slice) Reset() { *m = SendMsg_Slice{} }
func (m *SendMsg_Slice) String() string { return proto.CompactTextString(m) }
func (*SendMsg_Slice) ProtoMessage() {}
func (*SendMsg_Slice) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0, 0}
}
func (m *SendMsg_Slice) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg_Slice.Unmarshal(m, b)
}
func (m *SendMsg_Slice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg_Slice.Marshal(b, m, deterministic)
}
func (m *SendMsg_Slice) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg_Slice.Merge(m, src)
}
func (m *SendMsg_Slice) XXX_Size() int {
return xxx_messageInfo_SendMsg_Slice.Size(m)
}
func (m *SendMsg_Slice) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg_Slice.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg_Slice proto.InternalMessageInfo
func (m *SendMsg_Slice) GetNodes() []string {
if m != nil {
return m.Nodes
}
return nil
}
type SendMsg_State struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
VotedFor []string `protobuf:"bytes,2,rep,name=votedFor,proto3" json:"votedFor,omitempty"`
Accepted []string `protobuf:"bytes,3,rep,name=accepted,proto3" json:"accepted,omitempty"`
Confirmed []string `protobuf:"bytes,4,rep,name=confirmed,proto3" json:"confirmed,omitempty"`
QuorumSlices []*SendMsg_Slice `protobuf:"bytes,5,rep,name=quorumSlices,proto3" json:"quorumSlices,omitempty"`
Counter int32 `protobuf:"varint,6,opt,name=counter,proto3" json:"counter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SendMsg_State) Reset() { *m = SendMsg_State{} }
func (m *SendMsg_State) String() string { return proto.CompactTextString(m) }
func (*SendMsg_State) ProtoMessage() {}
func (*SendMsg_State) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{0, 1}
}
func (m *SendMsg_State) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SendMsg_State.Unmarshal(m, b)
}
func (m *SendMsg_State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SendMsg_State.Marshal(b, m, deterministic)
}
func (m *SendMsg_State) XXX_Merge(src proto.Message) {
xxx_messageInfo_SendMsg_State.Merge(m, src)
}
func (m *SendMsg_State) XXX_Size() int {
return xxx_messageInfo_SendMsg_State.Size(m)
}
func (m *SendMsg_State) XXX_DiscardUnknown() {
xxx_messageInfo_SendMsg_State.DiscardUnknown(m)
}
var xxx_messageInfo_SendMsg_State proto.InternalMessageInfo
func (m *SendMsg_State) GetId() string {
if m != nil |
return ""
}
func (m *SendMsg_State) GetVotedFor() []string {
if m != nil {
return m.VotedFor
}
return nil
}
func (m *SendMsg_State) GetAccepted() []string {
if m != nil {
return m.Accepted
}
return nil
}
func (m *SendMsg_State) GetConfirmed() []string {
if m != nil {
return m.Confirmed
}
return nil
}
func (m *SendMsg_State) GetQuorumSlices() []*SendMsg_Slice {
if m != nil {
return m.QuorumSlices
}
return nil
}
func (m *SendMsg_State) GetCounter() int32 {
if m != nil {
return m.Counter
}
return 0
}
type EmptyMessage struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EmptyMessage) Reset() { *m = EmptyMessage{} }
func (m *EmptyMessage) String() string { return proto.CompactTextString(m) }
func (*EmptyMessage) ProtoMessage() {}
func (*EmptyMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_9e36e933c92912d0, []int{1}
}
func (m *EmptyMessage) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EmptyMessage.Unmarshal(m, b)
}
func (m *EmptyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EmptyMessage.Marshal(b, m, deterministic)
}
func (m *EmptyMessage) XXX_Merge(src proto.Message) {
xxx_messageInfo_EmptyMessage.Merge(m, src)
}
func (m *EmptyMessage) XXX_Size() int {
return xxx_messageInfo_EmptyMessage.Size(m)
}
func (m *EmptyMessage) XXX_DiscardUnknown() {
xxx_messageInfo_EmptyMessage.DiscardUnknown(m)
}
var xxx_messageInfo_EmptyMessage proto.InternalMessageInfo
func init() {
proto.RegisterType((*SendMsg)(nil), "fvp.SendMsg")
proto.RegisterType((*SendMsg_Slice)(nil), "fvp.SendMsg.Slice")
proto.RegisterType((*SendMsg_State)(nil), "fvp.SendMsg.State")
proto.RegisterType((*EmptyMessage)(nil), "fvp.EmptyMessage")
}
func init() { proto.RegisterFile("fvp.proto", fileDescriptor_9e36e933c92912d0) }
var fileDescriptor_9e36e933c92912d0 = []byte{
// 272 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0x84, 0x30,
0x10, 0xc6, 0xe5, 0xef, 0xca, 0x2c, 0xd9, 0xc4, 0x89, 0x87, 0x86, 0x68, 0x42, 0x38, 0xe1, 0x85,
0xc3, 0xae, 0xf1, 0x0d, 0xf4, 0xb6, 0x17, 0x78, 0x02, 0xa4, 0xc3, 0x86, 0x28, 0x2d, 0xb6, 0x05,
0xe3, 0x7b, 0x79, 0xf7, 0xd5, 0x0c, 0xdd, 0x5d, 0xc5, 0xc4, 0x5b, 0x7f, 0xfd, 0xcd, 0xf7, 0xa5,
0x93, 0x42, 0xd4, 0x4e, 0x43, 0x31, 0x28, 0x69, 0x24, 0x7a, 0xed, 0x34, 0x64, 0x9f, 0x2e, 0xac,
0x2a, 0x12, 0x7c, 0xaf, 0x0f, 0x78, 0x0f, 0xeb, 0x17, 0x21, 0xdf, 0x45, 0x65, 0x6a, 0x43, 0x9a,
0x39, 0xa9, 0x97, 0xaf, 0xb7, 0x58, 0xcc, 0x89, 0xd3, 0x48, 0x61, 0x55, 0xb9, 0x1c, 0x43, 0x04,
0xdf, 0x90, 0xea, 0x99, 0x9b, 0x3a, 0x79, 0x50, 0xda, 0x73, 0x72, 0x0b, 0x41, 0xf5, 0xda, 0x35,
0x84, 0xd7, 0x10, 0x08, 0xc9, 0x4f, 0x65, 0x51, 0x79, 0x84, 0xe4, 0xcb, 0x81, 0xc0, 0xa6, 0x71,
0x03, 0x6e, 0xc7, 0x99, 0x93, 0x3a, 0x79, 0x54, 0xba, 0x1d, 0xc7, 0x04, 0x2e, 0x27, 0x69, 0x88,
0x3f, 0x49, 0xc5, 0x5c, 0x1b, 0xf9, 0xe1, 0xd9, 0xd5, 0x4d, 0x43, 0x83, 0x21, 0xce, 0xbc, 0xa3,
0x3b, 0x33, 0xde, 0x40, 0xd4, 0x48, 0xd1, 0x76, 0xaa, 0x27, 0xce, 0x7c, 0x2b, 0x7f, 0x2f, 0xf0,
0x01, 0xe2, 0xb7, 0x51, 0xaa, 0xb1, 0xb7, 0x8f, 0xd2, 0x2c, 0xf8, 0x6f, 0xb3, 0x59, 0x95, 0x7f,
0xe6, 0x90, 0xc1, 0xaa, 0x91, 0xa3, 0x30, 0xa4, 0x58, 0x68, 0xb7, 0x3b, 0x63, 0xb6, 0x81, 0xf8,
0xb1, 0x1f, 0xcc, 0xc7, 0x9e, 0xb4, 0xae, 0x0f, 0xb4, 0xdd, 0x41, 0x58, 0x91, 0x9a, 0x48, 0xe1,
0x1d, 0xf8, 0x73, 0x25, 0xc6, 0xcb, 0xf6, 0xe4, 0xca, 0xd2, 0x32, 0x92, 0x5d, 0x3c, 0x87, 0xf6,
0x1f, 0x76, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x29, 0x50, 0xd3, 0x94, 0x01, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ServerClient is the client API for Server service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ServerClient interface {
Send(ctx context.Context, in *SendMsg, opts ...grpc.CallOption) (*EmptyMessage, error)
}
type serverClient struct {
cc *grpc.ClientConn
}
func NewServerClient(cc *grpc.ClientConn) ServerClient {
return &serverClient{cc}
}
func (c *serverClient) Send(ctx context.Context, in *SendMsg, opts ...grpc.CallOption) (*EmptyMessage, error) {
out := new(EmptyMessage)
err := c.cc.Invoke(ctx, "/fvp.Server/Send", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ServerServer is the server API for Server service.
type ServerServer interface {
Send(context.Context, *SendMsg) (*EmptyMessage, error)
}
// UnimplementedServerServer can be embedded to have forward compatible implementations.
type UnimplementedServerServer struct {
}
func (*UnimplementedServerServer) Send(ctx context.Context, req *SendMsg) (*EmptyMessage, error) {
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
}
func RegisterServerServer(s *grpc.Server, srv ServerServer) {
s.RegisterService(&_Server_serviceDesc, srv)
}
func _Server_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendMsg)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ServerServer).Send(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/fvp.Server/Send",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ServerServer).Send(ctx, req.(*SendMsg))
}
return interceptor(ctx, in, info, handler)
}
var _Server_serviceDesc = grpc.ServiceDesc{
ServiceName: "fvp.Server",
HandlerType: (*ServerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Send",
Handler: _Server_Send_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "fvp.proto",
}
| {
return m.Id
} | conditional_block |
cli.py | """! \file
Utilities for command line interfaces.
The default interface can be set up using isle.cli.init().
More control is available through the lower level functions.
"""
from abc import ABCMeta, abstractmethod
import argparse
import contextlib
import logging
from pathlib import Path
import random
import shutil
import sys
import time
import isle
# the active progress bar
# Yes, yes a global variable but we need singletons here!
_activeBar = None
## Unicode ellipsis string.
ELLIPSIS = "…"
########################################################################
# General stuff
#
def terminalWidth():
"""!
Return the current number of columns of the terminal.
\note This does not give the proper size if `sys.stdout` is
redirected to a file.
"""
return shutil.get_terminal_size().columns
def stderrConnectedToTerm():
"""!Return True if stderr is connected to a terminal, False otherwise."""
return sys.stderr.isatty()
########################################################################
# Progress bars
#
class ETA:
"""!
Estimate the time of arrival for iterations.
The ETA is computed from a linear regression to the starting time
and current time (time at execution of the __call__ method).
This is not very stable for strongly changing durations of individual iterations
but gives a good estimate for mostly stable durations.
The start time is only set after the first iteration
(calling __call__ with current > 0).
This is because the first iteration might perform some expensive setup
operations the following iterations do not have to repeat.
In such a case, using the time at current=0 would introduce a bias
to the ETA.
Given an initial iteration xi which was started at time ti
and a current iteration xc and current time tc
the estimated final time (ETA) is
tf = mc * (xf - xi) + ti
where
mc = (tc - ti) / (xc - xi).
"""
def __init__(self, target):
"""!Initialize with a given target iteration number."""
if target <= 1:
raise ValueError(f"Target iteration of ETA must be > 1, got {target}")
self.targetIteration = target
self._ti = None # initial time
self._xi = None # iteration when initial time was measured
def __call__(self, current):
r"""!
Estimate the time of arrival given a current iteration.
\param current Iteration number the loop is currently at.
\returns - Estimated time of arrival in seconds since epoch.
- `None` if no starting time has been set yet or `current`
is below or equal to starting iteration.
"""
# can't estimate time yet
if self._ti is None:
# initial time is time after first iteration or later
if current > 0:
self._ti = time.time()
self._xi = current
return None
# The method might be called multiple times during the same iteration.
# But cannot estimate while still at initial iteration (xi).
if current <= self._xi:
return None
# do linear regression
tc = time.time()
return (tc-self._ti)/(current-self._xi) * (self.targetIteration-self._xi) + self._ti
def reset(self, target):
"""!Re-initialize with a given target iteration number."""
if target <= 1:
raise ValueError(f"Target iteration of ETA must be > 1, got {target}")
self.targetIteration = target
self._ti = None # initial time
self._xi = None # iteration when initial time was measured
class Progressbar(metaclass=ABCMeta):
r"""!
Abstract base class for progress bars.
\warning Any and all terminal output must be made through TerminalProgressbar.write()
while a progress bar is active!
Otherwise it will interfere with the progress bar and might get erased
when the bar is redrawn.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
def __init__(self, message=""):
r"""!
Construct a new base progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param message A string that is displayed in front of the actual bar
and realted information.
"""
self._message = message
self._startTime = time.time()
@abstractmethod
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
raise NotImplementedError()
@abstractmethod
def clear(self):
"""!Clear the current line of output."""
raise NotImplementedError()
@abstractmethod
def redraw(self):
"""!Clear the current bar and draw a new one."""
raise NotImplementedError()
@abstractmethod
def draw(self):
"""!Draw the bar."""
raise NotImplementedError()
def write(self, msg):
r"""!
Write a message to the terminal.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
sys.stderr.write(msg)
def finalize(self):
"""!Remove bar from screen and show a message showing the run time."""
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
class TerminalProgressbar(Progressbar):
r"""!
A progress bar that is shown in the terminal via sys.stdio.
Needs to modify the current line of output in order to animate the progress bar.
This is only possible when the output is indeed connected to a terminal.
If sys.stderr is piped into a file, this class cannot operate properly.
\warning Any and all terminal output must be made through TerminalProgressbar.write()
while a progress bar is active!
Otherwise it will interfere with the progress bar and might get erased
when the bar is redrawn.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
# Escape sequence to clear the current line right of the cursor.
_CLEAR = "[K"
# Escape sequence to move the cursor to the beginning of the current line.
_FRONT = "[G"
class _FillingBar:
"""!A bar that fills up over time approaching a target."""
def __init__(self, length, filledChar, emptyChar):
self.length = length
self._filledChar = filledChar
self._emptyChar = emptyChar
def construct(self, current, target):
"""!Construct a string representing the bar from a current and target fill status."""
nfilled = int(current / target * self.length)
return self._filledChar*nfilled + self._emptyChar*(self.length-nfilled)
class _OscillatingBar:
"""!A 'bar' that oscillates randomly for cases where no target is known."""
# element [i][j] transitions from height i to height j
_PIECES = [["⠤", "⠴", "⠼"],
["⠲", "⠒", "⠚"],
["⠹", "⠙", "⠉"]]
# extended 4-dot pieces, looks a bit odd because the lower most dots are very low
# _PIECES = [
# ["⣀", "⣠", "⣰", "⣸"],
# ["⢤", "⠤", "⠴", "⠼"],
# ["⢲", "⠲", "⠒", "⠚"],
# ["⢹", "⠹", "⠙", "⠉"]
# ]
def __init__(self, length):
self.length = length
self._rng = random.Random()
self._currentHeight = 1
self._barStr = self._PIECES[self._currentHeight][self._currentHeight]*length
def construct(self, _current, _target):
"""!Construct a string representing the bar, arguments are ignored."""
h = self._rng.randint(0, 2) # new height
# shift by one and add new element
self._barStr = self._barStr[1:]+self._PIECES[self._currentHeight][h]
self._currentHeight = h
return self._barStr
def __init__(self, target, message="", barLength=40,
barChar="#", emptyChar="-"):
r"""!
Construct a new progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
May be `None` in which case the bar indicates only
general progress without showing how far away the goal is.
\param message A string that is displayed in front of the actual bar
and realted information.
\param barLength Number of characters the bar itself occupies in the terminal.
Does not include ETA and iteration counter.
\param barChar Single character to use for the filled portion of the bar.
\param emptyChar Single character to use for the not yet filled portion of the bar.
"""
super().__init__(message)
self._target = target
self._current = 0
# ETA's __init__ makes sure that target > 0
self._eta = ETA(target) if target else None
self._bar = self._FillingBar(barLength, barChar, emptyChar) \
if target \
else self._OscillatingBar(barLength)
# format string for text after bar
if target:
targetStr = f"{target:d}"
self._postFmt = "] ({:"+str(len(targetStr))+"d}/"+targetStr+") "
else:
self._postFmt = "] ({:3d}/?)"
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
self._current += amount
self.redraw()
def clear(self):
"""!Clear the current line of output."""
sys.stderr.write(self._FRONT+self._CLEAR)
def redraw(self):
"""!Clear the current bar and draw a new one."""
# enough to go to front, don't need to clear the line
sys.stderr.write(self._FRONT)
self.draw()
def draw(self):
"""!Draw the bar into the terminal at the current cursor position."""
# format string before bar
if self._eta:
eta = self._eta(self._current)
pre = " ETA: " \
+ (time.strftime("%H:%M:%S", time.localtime(eta)) if eta else "??:??:??") \
+ " ["
else:
pre = " ["
# format string after bar
post = self._postFmt.format(self._current)
# current total length of a line in the terminal
lineLength = terminalWidth()
# length available for messages
availLength = lineLength - len(pre) - len(post) - self._bar.length
if availLength < 10:
# not enough space to display everything, only show message
out = self._message[:lineLength-4]+" ["+ELLIPSIS+"]"
else:
# add spaces after message or abbreviate message depending on availLength
spaceLength = availLength - len(self._message)
msg = self._message+" "*spaceLength \
if spaceLength >= 0 \
else self._message[:availLength-1]+ELLIPSIS
# construct the full output string
out = msg+pre+self._bar.construct(self._current, self._target)+post
sys.stderr.write(out)
sys.stderr.flush()
def write(self, msg):
r"""!
Write a message to the terminal.
Clears the current progress bar on screen, writes the message,
appends a newline if needed and redraws the bar.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
self.clear()
if not msg.endswith("\n"):
sys.stderr.write(msg+"\n")
else:
sys.stderr.write(msg)
self.draw()
def finalize(self):
"""!Remove bar from screen and print a message showing the run time."""
self.clear()
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
class FileProgressbar(Progressbar):
r"""!
A progress indicator that writes individual update messages.
This is not really a progress 'bar' as it only prints simple messages
indicating progress, not an animated bar.
Is still writes to `sys.stderr` though, not directly to a file.
Use this class if `sys.stderr` is not connected to a terminal.
\warning Even though normal output does not interfere with this progress bar,
it is still better to use FileProgressbar.write() instead of
plain `print()` for uniformity with TerminalProgressbar.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
def __init__(self, target, message="", updateRate=1):
r"""!
Construct a new progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
May be `None` in which case the bar indicates only
general progress without showing how far away the goal is.
\param message A string that is displayed in front of the actual bar
and realted information.
\param updateRate The bar is only redrawn after updateRate number of steps.
"""
super().__init__(message)
self._target = target
self._updateRate = updateRate
self._current = 0
self._lastUpdated = -updateRate
# ETA's __init__ makes sure that target > 0
self._eta = ETA(target) if target else None
# format string for a counter
if target:
targetStr = f"{target:d}"
self._counterFmt = " ({:"+str(len(targetStr))+"d}/"+targetStr+") "
else:
self._counterFmt = " ({:3d}/?)"
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
self._current += amount
if self._current - self._updateRate >= self._lastUpdated:
self.redraw()
# go to nearest multiple of updateRate less than current
self._lastUpdated = (self._current // self._updateRate)*self._updateRate
def clear(self):
"""!Do nothing, cannot easily erase content from files."""
def redraw(self):
"""!Just call draw()."""
self.draw()
def draw(self):
"""!Print progress information."""
# format progress indication string
if self._eta:
eta = self._eta(self._current)
progStr = " ETA: " \
+ (time.strftime("%H:%M:%S", time.localtime(eta)) if eta else "??:??:??")
else:
progStr = ""
# format string after bar
progStr += self._counterFmt.format(self._current)
sys.stderr.write(self._message+progStr+"\n")
sys.stderr.flush()
def write(self, msg):
r"""!
Write out a message.
Just redirects to sys.stderr.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
sys.stderr.write(msg)
def finalize(self):
"""!Print a message showing the run time."""
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
def makeProgressbar(target, message="", updateRate=1):
r"""!
Construct a Progressbar.
Selects either TerminalProgressbar or FilePRogressbar
depending on whether `sys.stderr` is connected to a terminal
or not.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
\param message String to display with the progressbar.
\param updateRate The bar is only redrawn after updateRate number of steps.
Only used when writing to file.
\returns A new Progressbar instance.
"""
if stderrConnectedToTerm():
return TerminalProgressbar(target, message)
return FileProgressbar(target, message, updateRate)
@contextlib.contextmanager
def trackProgress(target, message="", updateRate=1):
r"""!
A context manager to track progress of an operation via a progress bar.
Sets up and returns a new progress bar.
The caller needs to advance that bar themselves.
\param target Target number of steps to track.
Can be `None` in which case the bar only indicates that something,
happens, not how far away the goal is.
\param message Message to display in front of the progress bar.
\returns A newly constructed instance of Progressbar.
\throws RuntimeError is a progress bar is already active.
"""
global _activeBar
if _activeBar is not None:
logging.getLogger(__name__).error("Cannot construct a new progress bar, "
"another one is already active.")
raise RuntimeError("A progress bar is already active.")
try:
_activeBar = makeProgressbar(target, message, updateRate)
yield _activeBar
_activeBar.finalize() # success => clean up
except:
# failure => leave bar visible and advance a line
sys.stderr.write("\n")
raise
finally:
# in any case the bar is now done
_activeBar = None
def progressRange(start, stop=None, step=1, message="", updateRate=1):
r"""!
Like built in `range()` but indicates progress using a Progressbar.
Parameters `start`, `stop`, and `step` behave the same way as for
the built in `range()` generator.
`message` is displayed in front of the progress bar.
"""
# mimic behavior of built in range
if stop is None:
stop = start
start = 0
with trackProgress(stop-start, message, updateRate) as pbar:
for cur in range(start, stop, step):
yield cur
# advance only up to stop
# If stop-start is not a multiple of step, advance(step) would overshoot.
pbar.advance(min(step, stop-cur))
########################################################################
# Logging
#
class ProgressStream:
"""!
A barbones output stream that writes to `sys.stderr` or
directs the output through a progress bar if one is active.
"""
def write(self, msg):
"""!Write msg to `sys.stderr`."""
if _activeBar is not None:
# The c++ logger sends spurious empty lines,
# just gobble them up.
if msg.strip():
_activeBar.write(msg)
else:
sys.stderr.write(msg)
class ColorFormatter(logging.Formatter):
"""!
A logging formatter that uses colors for loglevel and logger name.
Colors are encoded using ANSI escape sequences. If they are not supported,
the output shows extra characters. You should therefore pick a different
formatter if you write to file.
"""
## Colorized level names.
LEVELNAMES = {
logging.DEBUG: "[94mDEBUG[0m",
logging.INFO: "INFO",
logging.WARNING: "[33mWARNING[0m",
logging.ERROR: "[31mERROR[0m",
logging.CRITICAL: "[91mCRITICAL[0m",
}
def format(self, record):
"!Format a record using colors."
# Hack the colors into the record itself and let super do the heavy lifting.
record.levelname = self.LEVELNAMES[record.levelno]
record.name = f"[1m{record.name}[0m"
return super().format(record)
def _suppressGoogleLogWarning():
"""
Suppress warning emitted by absl.logging
'WARNING: Logging before flag parsing goes to stderr.'
Does nothing if abseil-py is not installed.
"""
try:
# Tensorflow uses Google's abseil-py library, which uses a Google-specific
# wrapper for logging. That wrapper will write a warning to sys.stderr if
# the Google command-line flags library has not been initialized.
#
# https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825
#
# We don't want this here because we have our own logging setup.
import absl.logging
# https://github.com/abseil/abseil-py/issues/99
logging.root.removeHandler(absl.logging._absl_handler)
# https://github.com/abseil/abseil-py/issues/102
absl.logging._warn_preinit_stderr = False
except Exception:
pass
def setupLogging(logfile=None, verbosity=0):
r"""!
Set up Python's logging framework.
The root logger is set up to output to terminal and file.
The former shows colored output if `sys.stderr` is connected to a terminal
and is aware of Progressbar.
\param logfile Write log to this file.
If `None`, no file output is performed.
\param verbosity Set logging level.
The minimum level for each handler is
`verbosity` | terminal | file
----------- | -------- | ----
0 | WARNING | INFO
1 | INFO | INFO
2 | DEBUG | DEBUG
\throws RuntimeError if this function is called more than once.
It is safe to discard this exception,
logging will still be set up properly.
"""
if setupLogging.isSetUp:
logging.getLogger(__name__).error("Called setupLogging a second time."
"This function must be called *exactly* once.")
raise RuntimeError("Logging already set up")
_suppressGoogleLogWarning()
if verbosity > 2:
# can't be any noisier than that
verbosity = 2
# need at least this level so all messages get out
minLoglevel = logging.DEBUG if verbosity == 2 else logging.INFO
# No need for keeping track of threads in Python.
# In C++, all bets are off.
logging.logThreads = 0
# configure the root logger
logger = logging.getLogger("")
logger.setLevel(minLoglevel)
if logfile:
# output to file at least at level INFO and w/o colors
fh = logging.FileHandler(logfile, "w")
fh.setLevel(minLoglevel)
fh.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%Y-%m-%d %H:%M:%S"))
logger.addHandler(fh)
# and output to STDERR based on verbosity and possibly w/ colors
ch = logging.StreamHandler(stream=ProgressStream())
ch.setLevel((logging.WARNING, logging.INFO, logging.DEBUG)[verbosity])
if stderrConnectedToTerm():
ch.setFormatter(ColorFormatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%H:%M:%S"))
else:
ch.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%H:%M:%S"))
logger.addHandler(ch)
# done => Never run this code again!
setupLogging.isSetUp = True
setupLogging.isSetUp = False
########################################################################
# Argument parsing
#
def makeDefaultParser(defaultLog="none", **kwargs):
r"""!
Construct and return a new argument parser with the default arguments.
See isle.cli.addDefaultArgs().
\param defaultLog Default log file in case user does not supply --log argument.
\param **kwargs Passed to constructor of ArgumentParser.
"""
return addDefaultArgs(argparse.ArgumentParser(**kwargs), defaultLog)
def addDefaultArgs(parser, defaultLog="none"):
"""!Add default arguments common to all commands."""
parser.add_argument("--version", action="version",
version=f"Isle {isle.__version__}")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Make output more verbose, stacks.")
parser.add_argument("--log", default=defaultLog,
help="Specify log file name. Set to none to not write log file.")
return parser
def addContinueArgs(parser):
"""!Add arguments for continuation run to parser."""
parser.add_argument("infile", help="Input file.", type=Path)
parser.add_argument("-o", "--output", help="Output file",
type=Path, dest="outfile")
parser.add_argument("-i", "--initial", type=int, default=-1,
help="Initial checkpoint for HMC")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing output files")
parser.add_argument("-s", "--save-freq", type=int, default=None,
help="Save configurations every s trajectories, "
"computed from infile by default")
parser.add_argument("-c", "--checkpoint-freq", type=int, default=None,
help="Save checkpoints every c trajectories, "
"computed from infile by default")
requiredGrp = parser.add_argument_group("required named arguments")
requiredGrp.add_argument("-n", "--ntrajectories", type=int, required=True,
help="Number of trajectories to produce")
return parser
def addMeasArgs(parser):
"""!Add arguments for measurements to parser."""
parser.add_argument("infile", help="Input file", type=Path)
parser.add_argument("-o", "--output", help="Output file", type=Path, dest="outfile")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing output file.")
return parser
def addShowArgs(parser):
"""!Add arguments for reporting to parser."""
reporters = ["overview", "lattice", "correlator", "tuning"]
class _ReportAction(argparse.Action):
"""!custom action to parse reporters."""
def | , nargs="+", type=Path)
parser.add_argument("-r", "--report", action=_ReportAction, metavar="", default=["overview"],
help="Comma separated list of reporters to use. Allowed values are ["
+",".join(reporters)+",all] Defaults to overview.")
return parser
def _makeParser(argParser, **kwargs):
"""!Make an argument parser from given command name and keyword arguments."""
cmdArgMap = {"continue": addContinueArgs,
"meas": addMeasArgs,
"show": addShowArgs,
"default": lambda parser: parser}
defaultLog = {"continue": "isle.hmc.log",
"meas": "isle.meas.log",
"show": "none",
"default": "isle.log"}
if not argParser in cmdArgMap:
# this is pre logging setup => do it the ugly way
print(f"Error: requested argParser name not supported: {argParser}")
raise ValueError("Error: requested argParser name not supported")
return cmdArgMap[argParser](makeDefaultParser(defaultLog=defaultLog[argParser], **kwargs))
########################################################################
# The one function to control all the rest.
#
def init(argParser="default", defaultLog=None, verbosity=0, **kwargs):
r"""!
Initialize command line interface.
This function must be called before any I/O as it sets up the logging framework.
\param argParser Command line argument parser. Can be
- `argparse.ArgumentParser`: Use this parser as is.
- `str`: Construct a parser based on this command name.
See `add*Args` functions.
- `None`: Don't parse any command line arguments.
Log file and verbosity are set to the values provided in
corresponding function parameters.
\param defaultLog Log file to use if `argParser is None`.
\param verbosity Output verbisity level to use if `argParser is None`.
\param **kwargs Passed to isle.cli.makeDefaultParser() if `argParser` is a string.
\returns Parsed arguments.
"""
if argParser is not None:
if isinstance(argParser, str):
# construct new parser based on command name
args = _makeParser(argParser, **kwargs).parse_args()
else:
# use provided parser
args = argParser.parse_args()
defaultLog = None if not hasattr(args, "log") or args.log.lower() == "none" else args.log
verbosity = args.verbose if hasattr(args, "verbose") else 0
else:
# don't parse anything, use default values
args = None
setupLogging(defaultLog, verbosity=verbosity)
return args
| __call__(self, parser, namespace, values, option_string=None):
if "all" in values:
setattr(namespace, self.dest, reporters)
else:
setattr(namespace, self.dest, values.split(","))
parser.add_argument("input", help="Input file" | identifier_body |
cli.py | """! \file
Utilities for command line interfaces.
The default interface can be set up using isle.cli.init().
More control is available through the lower level functions.
"""
from abc import ABCMeta, abstractmethod
import argparse
import contextlib
import logging
from pathlib import Path
import random
import shutil
import sys
import time
import isle
# the active progress bar
# Yes, yes a global variable but we need singletons here!
_activeBar = None
## Unicode ellipsis string.
ELLIPSIS = "…"
########################################################################
# General stuff
#
def terminalWidth():
"""!
Return the current number of columns of the terminal.
\note This does not give the proper size if `sys.stdout` is
redirected to a file.
"""
return shutil.get_terminal_size().columns
def stderrConnectedToTerm():
"""!Return True if stderr is connected to a terminal, False otherwise."""
return sys.stderr.isatty()
########################################################################
# Progress bars
#
class ETA:
"""!
Estimate the time of arrival for iterations.
The ETA is computed from a linear regression to the starting time
and current time (time at execution of the __call__ method).
This is not very stable for strongly changing durations of individual iterations
but gives a good estimate for mostly stable durations.
The start time is only set after the first iteration
(calling __call__ with current > 0).
This is because the first iteration might perform some expensive setup
operations the following iterations do not have to repeat.
In such a case, using the time at current=0 would introduce a bias
to the ETA.
Given an initial iteration xi which was started at time ti
and a current iteration xc and current time tc
the estimated final time (ETA) is
tf = mc * (xf - xi) + ti
where
mc = (tc - ti) / (xc - xi).
"""
def __init__(self, target):
"""!Initialize with a given target iteration number."""
if target <= 1:
raise ValueError(f"Target iteration of ETA must be > 1, got {target}")
self.targetIteration = target
self._ti = None # initial time
self._xi = None # iteration when initial time was measured
def __call__(self, current):
r"""!
Estimate the time of arrival given a current iteration.
\param current Iteration number the loop is currently at.
\returns - Estimated time of arrival in seconds since epoch.
- `None` if no starting time has been set yet or `current`
is below or equal to starting iteration.
"""
# can't estimate time yet
if self._ti is None:
# initial time is time after first iteration or later
if current > 0:
self._ti = time.time()
self._xi = current
return None
# The method might be called multiple times during the same iteration.
# But cannot estimate while still at initial iteration (xi).
if current <= self._xi:
return None
# do linear regression
tc = time.time()
return (tc-self._ti)/(current-self._xi) * (self.targetIteration-self._xi) + self._ti
def reset(self, target):
"""!Re-initialize with a given target iteration number."""
if target <= 1:
raise ValueError(f"Target iteration of ETA must be > 1, got {target}")
self.targetIteration = target
self._ti = None # initial time
self._xi = None # iteration when initial time was measured
class Progressbar(metaclass=ABCMeta):
r"""!
Abstract base class for progress bars.
\warning Any and all terminal output must be made through TerminalProgressbar.write()
while a progress bar is active!
Otherwise it will interfere with the progress bar and might get erased
when the bar is redrawn.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
def __init__(self, message=""):
r"""!
Construct a new base progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param message A string that is displayed in front of the actual bar
and realted information.
"""
self._message = message
self._startTime = time.time()
@abstractmethod
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
raise NotImplementedError()
@abstractmethod
def clear(self):
"""!Clear the current line of output."""
raise NotImplementedError()
@abstractmethod
def redraw(self):
"""!Clear the current bar and draw a new one."""
raise NotImplementedError()
@abstractmethod
def draw(self):
"""!Draw the bar."""
raise NotImplementedError()
def write(self, msg):
r"""!
Write a message to the terminal.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
sys.stderr.write(msg)
def finalize(self):
"""!Remove bar from screen and show a message showing the run time."""
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
class TerminalProgressbar(Progressbar):
r"""!
A progress bar that is shown in the terminal via sys.stdio.
Needs to modify the current line of output in order to animate the progress bar.
This is only possible when the output is indeed connected to a terminal.
If sys.stderr is piped into a file, this class cannot operate properly.
\warning Any and all terminal output must be made through TerminalProgressbar.write()
while a progress bar is active!
Otherwise it will interfere with the progress bar and might get erased
when the bar is redrawn.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
# Escape sequence to clear the current line right of the cursor.
_CLEAR = "[K"
# Escape sequence to move the cursor to the beginning of the current line.
_FRONT = "[G"
class _FillingBar:
"""!A bar that fills up over time approaching a target."""
def __init__(self, length, filledChar, emptyChar):
self.length = length
self._filledChar = filledChar
self._emptyChar = emptyChar
def construct(self, current, target):
"""!Construct a string representing the bar from a current and target fill status."""
nfilled = int(current / target * self.length)
return self._filledChar*nfilled + self._emptyChar*(self.length-nfilled)
class _OscillatingBar:
"""!A 'bar' that oscillates randomly for cases where no target is known."""
# element [i][j] transitions from height i to height j
_PIECES = [["⠤", "⠴", "⠼"],
["⠲", "⠒", "⠚"],
["⠹", "⠙", "⠉"]]
# extended 4-dot pieces, looks a bit odd because the lower most dots are very low
# _PIECES = [
# ["⣀", "⣠", "⣰", "⣸"],
# ["⢤", "⠤", "⠴", "⠼"],
# ["⢲", "⠲", "⠒", "⠚"],
# ["⢹", "⠹", "⠙", "⠉"]
# ]
def __init__(self, length):
self.length = length
self._rng = random.Random()
self._currentHeight = 1
self._barStr = self._PIECES[self._currentHeight][self._currentHeight]*length
def construct(self, _current, _target):
"""!Construct a string representing the bar, arguments are ignored."""
h = self._rng.randint(0, 2) # new height
# shift by one and add new element
self._barStr = self._barStr[1:]+self._PIECES[self._currentHeight][h]
self._currentHeight = h
return self._barStr
def __init__(self, target, message="", barLength=40,
barChar="#", emptyChar="-"):
r"""!
Construct a new progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
May be `None` in which case the bar indicates only
general progress without showing how far away the goal is.
\param message A string that is displayed in front of the actual bar
and realted information.
\param barLength Number of characters the bar itself occupies in the terminal.
Does not include ETA and iteration counter.
\param barChar Single character to use for the filled portion of the bar.
\param emptyChar Single character to use for the not yet filled portion of the bar.
"""
super().__init__(message)
self._target = target
self._current = 0
# ETA's __init__ makes sure that target > 0
self._eta = ETA(target) if target else None
self._bar = self._FillingBar(barLength, barChar, emptyChar) \
if target \
else self._OscillatingBar(barLength)
# format string for text after bar
if target:
targetStr = f"{target:d}"
self._postFmt = "] ({:"+str(len(targetStr))+"d}/"+targetStr+") "
else:
self._postFmt = "] ({:3d}/?)"
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
self._current += amount
self.redraw()
def clear(self):
"""!Clear the current line of output."""
sys.stderr.write(self._FRONT+self._CLEAR)
def redraw(self):
"""!Clear the current bar and draw a new one."""
# enough to go to front, don't need to clear the line
sys.stderr.write(self._FRONT)
self.draw()
def draw(self):
"""!Draw the bar into the terminal at the current cursor position."""
# format string before bar
if self._eta:
eta = self._eta(self._current)
pre = " ETA: " \
+ (time.strftime("%H:%M:%S", time.localtime(eta)) if eta else "??:??:??") \
+ " ["
else:
pre = " ["
# format string after bar
post = self._postFmt.format(self._current)
# current total length of a line in the terminal
lineLength = terminalWidth()
# length available for messages
availLength = lineLength - len(pre) - len(post) - self._bar.length
if availLength < 10:
# not enough space to display everything, only show message
out = self._message[:lineLength-4]+" ["+ELLIPSIS+"]"
else:
# add spaces after message or abbreviate message depending on availLength
spaceLength = availLength - len(self._message)
msg = self._message+" "*spaceLength \
if spaceLength >= 0 \
else self._message[:availLength-1]+ELLIPSIS
# construct the full output string
out = msg+pre+self._bar.construct(self._current, self._target)+post
sys.stderr.write(out)
sys.stderr.flush()
def write(self, msg):
r"""!
Write a message to the terminal.
Clears the current progress bar on screen, writes the message,
appends a newline if needed and redraws the bar.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
self.clear()
if not msg.endswith("\n"):
sys.stderr.write(msg+"\n")
else:
sys.stderr.write(msg)
self.draw()
def finalize(self):
"""!Remove bar from screen and print a message showing the run time."""
self.clear()
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
class FileProgressbar(Progressbar):
r"""!
A progress indicator that writes individual update messages.
This is not really a progress 'bar' as it only prints simple messages
indicating progress, not an animated bar.
Is still writes to `sys.stderr` though, not directly to a file.
Use this class if `sys.stderr` is not connected to a terminal.
\warning Even though normal output does not interfere with this progress bar,
it is still better to use FileProgressbar.write() instead of
plain `print()` for uniformity with TerminalProgressbar.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
def __init__(self, target, message="", updateRate=1):
r"""!
Construct a new progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
May be `None` in which case the bar indicates only
general progress without showing how far away the goal is.
\param message A string that is displayed in front of the actual bar
and realted information.
\param updateRate The bar is only redrawn after updateRate number of steps.
"""
super().__init__(message)
self._target = target
self._updateRate = updateRate
self._current = 0
self._lastUpdated = -updateRate
# ETA's __init__ makes sure that target > 0
self._eta = ETA(target) if target else None
# format string for a counter
if target:
targetStr = f"{target:d}"
self._counterFmt = " ({:"+str(len(targetStr))+"d}/"+targetStr+") "
else:
self._counterFmt = " ({:3d}/?)"
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
self._current += amount
if self._current - self._updateRate >= self._lastUpdated:
self.redraw()
# go to nearest multiple of updateRate less than current
self._lastUpdated = (self._current // self._updateRate)*self._updateRate
def clear(self):
"""!Do nothing, cannot easily erase content from files."""
def redraw(self):
"""!Just call draw()."""
self.draw()
def draw(self):
"""!Print progress information."""
# format progress indication string
if self._eta:
eta = self._eta(self._current)
progStr = " ETA: " \
+ (time.strftime("%H:%M:%S", time.localtime(eta)) if eta else "??:??:??")
else:
progStr = ""
# format string after bar
progStr += self._counterFmt.format(self._current)
sys.stderr.write(self._message+progStr+"\n")
sys.stderr.flush()
def write(self, msg):
r"""!
Write out a message.
Just redirects to sys.stderr.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
sys.stderr.write(msg)
def finalize(self):
"""!Print a message showing the run time."""
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
def makeProgressbar(target, message="", updateRate=1):
r"""!
Construct a Progressbar.
Selects either TerminalProgressbar or FilePRogressbar
depending on whether `sys.stderr` is connected to a terminal
or not.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
\param message String to display with the progressbar.
\param updateRate The bar is only redrawn after updateRate number of steps.
Only used when writing to file.
\returns A new Progressbar instance.
"""
if stderrConnectedToTerm():
return TerminalProgressbar(target, message)
return FileProgressbar(target, message, updateRate)
@contextlib.contextmanager
def trackProgress(target, message="", updateRate=1):
r"""!
A context manager to track progress of an operation via a progress bar.
Sets up and returns a new progress bar.
The caller needs to advance that bar themselves.
\param target Target number of steps to track.
Can be `None` in which case the bar only indicates that something,
happens, not how far away the goal is.
\param message Message to display in front of the progress bar.
\returns A newly constructed instance of Progressbar.
\throws RuntimeError is a progress bar is already active.
"""
global _activeBar
if _activeBar is not None:
logging.getLogger(__name__).error("Cannot construct a new progress bar, "
"another one is already active.")
raise RuntimeError("A progress bar is already active.")
try:
_activeBar = makeProgressbar(target, message, updateRate)
yield _activeBar
_activeBar.finalize() # success => clean up
except:
# failure => leave bar visible and advance a line
sys.stderr.write("\n")
raise
finally:
# in any case the bar is now done
_activeBar = None
def progressRange(start, stop=None, step=1, message="", updateRate=1):
r"""!
Like built in `range()` but indicates progress using a Progressbar.
Parameters `start`, `stop`, and `step` behave the same way as for
the built in `range()` generator.
`message` is displayed in front of the progress bar.
"""
# mimic behavior of built in range
if stop is None:
stop = start
start = 0
with trackProgress(stop-start, message, updateRate) as pbar:
for cur in range(start, stop, step):
yield cur
# advance only up to stop
# If stop-start is not a multiple of step, advance(step) would overshoot.
pbar.advance(min(step, stop-cur))
########################################################################
# Logging
#
class ProgressStream:
"""!
A barbones output stream that writes to `sys.stderr` or
directs the output through a progress bar if one is active.
"""
def write(self, msg):
"""!Write msg to `sys.stde | ""
if _activeBar is not None:
# The c++ logger sends spurious empty lines,
# just gobble them up.
if msg.strip():
_activeBar.write(msg)
else:
sys.stderr.write(msg)
class ColorFormatter(logging.Formatter):
"""!
A logging formatter that uses colors for loglevel and logger name.
Colors are encoded using ANSI escape sequences. If they are not supported,
the output shows extra characters. You should therefore pick a different
formatter if you write to file.
"""
## Colorized level names.
LEVELNAMES = {
logging.DEBUG: "[94mDEBUG[0m",
logging.INFO: "INFO",
logging.WARNING: "[33mWARNING[0m",
logging.ERROR: "[31mERROR[0m",
logging.CRITICAL: "[91mCRITICAL[0m",
}
def format(self, record):
"!Format a record using colors."
# Hack the colors into the record itself and let super do the heavy lifting.
record.levelname = self.LEVELNAMES[record.levelno]
record.name = f"[1m{record.name}[0m"
return super().format(record)
def _suppressGoogleLogWarning():
"""
Suppress warning emitted by absl.logging
'WARNING: Logging before flag parsing goes to stderr.'
Does nothing if abseil-py is not installed.
"""
try:
# Tensorflow uses Google's abseil-py library, which uses a Google-specific
# wrapper for logging. That wrapper will write a warning to sys.stderr if
# the Google command-line flags library has not been initialized.
#
# https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825
#
# We don't want this here because we have our own logging setup.
import absl.logging
# https://github.com/abseil/abseil-py/issues/99
logging.root.removeHandler(absl.logging._absl_handler)
# https://github.com/abseil/abseil-py/issues/102
absl.logging._warn_preinit_stderr = False
except Exception:
pass
def setupLogging(logfile=None, verbosity=0):
r"""!
Set up Python's logging framework.
The root logger is set up to output to terminal and file.
The former shows colored output if `sys.stderr` is connected to a terminal
and is aware of Progressbar.
\param logfile Write log to this file.
If `None`, no file output is performed.
\param verbosity Set logging level.
The minimum level for each handler is
`verbosity` | terminal | file
----------- | -------- | ----
0 | WARNING | INFO
1 | INFO | INFO
2 | DEBUG | DEBUG
\throws RuntimeError if this function is called more than once.
It is safe to discard this exception,
logging will still be set up properly.
"""
if setupLogging.isSetUp:
logging.getLogger(__name__).error("Called setupLogging a second time."
"This function must be called *exactly* once.")
raise RuntimeError("Logging already set up")
_suppressGoogleLogWarning()
if verbosity > 2:
# can't be any noisier than that
verbosity = 2
# need at least this level so all messages get out
minLoglevel = logging.DEBUG if verbosity == 2 else logging.INFO
# No need for keeping track of threads in Python.
# In C++, all bets are off.
logging.logThreads = 0
# configure the root logger
logger = logging.getLogger("")
logger.setLevel(minLoglevel)
if logfile:
# output to file at least at level INFO and w/o colors
fh = logging.FileHandler(logfile, "w")
fh.setLevel(minLoglevel)
fh.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%Y-%m-%d %H:%M:%S"))
logger.addHandler(fh)
# and output to STDERR based on verbosity and possibly w/ colors
ch = logging.StreamHandler(stream=ProgressStream())
ch.setLevel((logging.WARNING, logging.INFO, logging.DEBUG)[verbosity])
if stderrConnectedToTerm():
ch.setFormatter(ColorFormatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%H:%M:%S"))
else:
ch.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%H:%M:%S"))
logger.addHandler(ch)
# done => Never run this code again!
setupLogging.isSetUp = True
setupLogging.isSetUp = False
########################################################################
# Argument parsing
#
def makeDefaultParser(defaultLog="none", **kwargs):
r"""!
Construct and return a new argument parser with the default arguments.
See isle.cli.addDefaultArgs().
\param defaultLog Default log file in case user does not supply --log argument.
\param **kwargs Passed to constructor of ArgumentParser.
"""
return addDefaultArgs(argparse.ArgumentParser(**kwargs), defaultLog)
def addDefaultArgs(parser, defaultLog="none"):
"""!Add default arguments common to all commands."""
parser.add_argument("--version", action="version",
version=f"Isle {isle.__version__}")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Make output more verbose, stacks.")
parser.add_argument("--log", default=defaultLog,
help="Specify log file name. Set to none to not write log file.")
return parser
def addContinueArgs(parser):
"""!Add arguments for continuation run to parser."""
parser.add_argument("infile", help="Input file.", type=Path)
parser.add_argument("-o", "--output", help="Output file",
type=Path, dest="outfile")
parser.add_argument("-i", "--initial", type=int, default=-1,
help="Initial checkpoint for HMC")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing output files")
parser.add_argument("-s", "--save-freq", type=int, default=None,
help="Save configurations every s trajectories, "
"computed from infile by default")
parser.add_argument("-c", "--checkpoint-freq", type=int, default=None,
help="Save checkpoints every c trajectories, "
"computed from infile by default")
requiredGrp = parser.add_argument_group("required named arguments")
requiredGrp.add_argument("-n", "--ntrajectories", type=int, required=True,
help="Number of trajectories to produce")
return parser
def addMeasArgs(parser):
"""!Add arguments for measurements to parser."""
parser.add_argument("infile", help="Input file", type=Path)
parser.add_argument("-o", "--output", help="Output file", type=Path, dest="outfile")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing output file.")
return parser
def addShowArgs(parser):
"""!Add arguments for reporting to parser."""
reporters = ["overview", "lattice", "correlator", "tuning"]
class _ReportAction(argparse.Action):
"""!custom action to parse reporters."""
def __call__(self, parser, namespace, values, option_string=None):
if "all" in values:
setattr(namespace, self.dest, reporters)
else:
setattr(namespace, self.dest, values.split(","))
parser.add_argument("input", help="Input file", nargs="+", type=Path)
parser.add_argument("-r", "--report", action=_ReportAction, metavar="", default=["overview"],
help="Comma separated list of reporters to use. Allowed values are ["
+",".join(reporters)+",all] Defaults to overview.")
return parser
def _makeParser(argParser, **kwargs):
"""!Make an argument parser from given command name and keyword arguments."""
cmdArgMap = {"continue": addContinueArgs,
"meas": addMeasArgs,
"show": addShowArgs,
"default": lambda parser: parser}
defaultLog = {"continue": "isle.hmc.log",
"meas": "isle.meas.log",
"show": "none",
"default": "isle.log"}
if not argParser in cmdArgMap:
# this is pre logging setup => do it the ugly way
print(f"Error: requested argParser name not supported: {argParser}")
raise ValueError("Error: requested argParser name not supported")
return cmdArgMap[argParser](makeDefaultParser(defaultLog=defaultLog[argParser], **kwargs))
########################################################################
# The one function to control all the rest.
#
def init(argParser="default", defaultLog=None, verbosity=0, **kwargs):
r"""!
Initialize command line interface.
This function must be called before any I/O as it sets up the logging framework.
\param argParser Command line argument parser. Can be
- `argparse.ArgumentParser`: Use this parser as is.
- `str`: Construct a parser based on this command name.
See `add*Args` functions.
- `None`: Don't parse any command line arguments.
Log file and verbosity are set to the values provided in
corresponding function parameters.
\param defaultLog Log file to use if `argParser is None`.
\param verbosity Output verbisity level to use if `argParser is None`.
\param **kwargs Passed to isle.cli.makeDefaultParser() if `argParser` is a string.
\returns Parsed arguments.
"""
if argParser is not None:
if isinstance(argParser, str):
# construct new parser based on command name
args = _makeParser(argParser, **kwargs).parse_args()
else:
# use provided parser
args = argParser.parse_args()
defaultLog = None if not hasattr(args, "log") or args.log.lower() == "none" else args.log
verbosity = args.verbose if hasattr(args, "verbose") else 0
else:
# don't parse anything, use default values
args = None
setupLogging(defaultLog, verbosity=verbosity)
return args
| rr`." | identifier_name |
cli.py | """! \file
Utilities for command line interfaces.
The default interface can be set up using isle.cli.init().
More control is available through the lower level functions.
"""
from abc import ABCMeta, abstractmethod
import argparse
import contextlib
import logging
from pathlib import Path
import random
import shutil
import sys
import time
import isle
# the active progress bar
# Yes, yes a global variable but we need singletons here!
_activeBar = None
## Unicode ellipsis string.
ELLIPSIS = "…"
########################################################################
# General stuff
#
def terminalWidth():
"""!
Return the current number of columns of the terminal.
\note This does not give the proper size if `sys.stdout` is
redirected to a file.
"""
return shutil.get_terminal_size().columns
def stderrConnectedToTerm():
"""!Return True if stderr is connected to a terminal, False otherwise."""
return sys.stderr.isatty()
########################################################################
# Progress bars
#
class ETA:
"""!
Estimate the time of arrival for iterations.
The ETA is computed from a linear regression to the starting time
and current time (time at execution of the __call__ method).
This is not very stable for strongly changing durations of individual iterations
but gives a good estimate for mostly stable durations.
The start time is only set after the first iteration
(calling __call__ with current > 0).
This is because the first iteration might perform some expensive setup
operations the following iterations do not have to repeat.
In such a case, using the time at current=0 would introduce a bias
to the ETA.
Given an initial iteration xi which was started at time ti
and a current iteration xc and current time tc
the estimated final time (ETA) is
tf = mc * (xf - xi) + ti
where
mc = (tc - ti) / (xc - xi).
"""
def __init__(self, target):
"""!Initialize with a given target iteration number."""
if target <= 1:
raise ValueError(f"Target iteration of ETA must be > 1, got {target}")
self.targetIteration = target
self._ti = None # initial time
self._xi = None # iteration when initial time was measured
def __call__(self, current):
r"""!
Estimate the time of arrival given a current iteration.
\param current Iteration number the loop is currently at.
\returns - Estimated time of arrival in seconds since epoch.
- `None` if no starting time has been set yet or `current`
is below or equal to starting iteration.
"""
# can't estimate time yet
if self._ti is None:
# initial time is time after first iteration or later
if current > 0:
self._ti = time.time()
self._xi = current
return None
# The method might be called multiple times during the same iteration.
# But cannot estimate while still at initial iteration (xi).
if current <= self._xi:
return None
# do linear regression
tc = time.time()
return (tc-self._ti)/(current-self._xi) * (self.targetIteration-self._xi) + self._ti
def reset(self, target):
"""!Re-initialize with a given target iteration number."""
if target <= 1:
raise ValueError(f"Target iteration of ETA must be > 1, got {target}")
self.targetIteration = target
self._ti = None # initial time
self._xi = None # iteration when initial time was measured
class Progressbar(metaclass=ABCMeta):
r"""!
Abstract base class for progress bars.
\warning Any and all terminal output must be made through TerminalProgressbar.write()
while a progress bar is active!
Otherwise it will interfere with the progress bar and might get erased
when the bar is redrawn.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
def __init__(self, message=""):
r"""!
Construct a new base progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param message A string that is displayed in front of the actual bar
and realted information.
"""
self._message = message
self._startTime = time.time()
@abstractmethod
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
raise NotImplementedError()
@abstractmethod
def clear(self):
"""!Clear the current line of output."""
raise NotImplementedError()
@abstractmethod
def redraw(self):
"""!Clear the current bar and draw a new one."""
raise NotImplementedError()
@abstractmethod
def draw(self):
"""!Draw the bar."""
raise NotImplementedError()
def write(self, msg):
r"""!
Write a message to the terminal.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
sys.stderr.write(msg)
def finalize(self):
"""!Remove bar from screen and show a message showing the run time."""
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
class TerminalProgressbar(Progressbar):
r"""!
A progress bar that is shown in the terminal via sys.stdio.
Needs to modify the current line of output in order to animate the progress bar.
This is only possible when the output is indeed connected to a terminal.
If sys.stderr is piped into a file, this class cannot operate properly.
\warning Any and all terminal output must be made through TerminalProgressbar.write()
while a progress bar is active!
Otherwise it will interfere with the progress bar and might get erased
when the bar is redrawn.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
# Escape sequence to clear the current line right of the cursor.
_CLEAR = "[K"
# Escape sequence to move the cursor to the beginning of the current line.
_FRONT = "[G"
class _FillingBar:
"""!A bar that fills up over time approaching a target."""
def __init__(self, length, filledChar, emptyChar):
self.length = length
self._filledChar = filledChar
self._emptyChar = emptyChar
def construct(self, current, target):
"""!Construct a string representing the bar from a current and target fill status."""
nfilled = int(current / target * self.length)
return self._filledChar*nfilled + self._emptyChar*(self.length-nfilled)
class _OscillatingBar:
"""!A 'bar' that oscillates randomly for cases where no target is known."""
# element [i][j] transitions from height i to height j
_PIECES = [["⠤", "⠴", "⠼"],
["⠲", "⠒", "⠚"],
["⠹", "⠙", "⠉"]]
# extended 4-dot pieces, looks a bit odd because the lower most dots are very low
# _PIECES = [
# ["⣀", "⣠", "⣰", "⣸"],
# ["⢤", "⠤", "⠴", "⠼"],
# ["⢲", "⠲", "⠒", "⠚"],
# ["⢹", "⠹", "⠙", "⠉"]
# ]
def __init__(self, length):
self.length = length
self._rng = random.Random()
self._currentHeight = 1
self._barStr = self._PIECES[self._currentHeight][self._currentHeight]*length
def construct(self, _current, _target):
"""!Construct a string representing the bar, arguments are ignored."""
h = self._rng.randint(0, 2) # new height
# shift by one and add new element
self._barStr = self._barStr[1:]+self._PIECES[self._currentHeight][h]
self._currentHeight = h
return self._barStr
def __init__(self, target, message="", barLength=40,
barChar="#", emptyChar="-"):
r"""!
Construct a new progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
May be `None` in which case the bar indicates only
general progress without showing how far away the goal is.
\param message A string that is displayed in front of the actual bar
and realted information.
\param barLength Number of characters the bar itself occupies in the terminal.
Does not include ETA and iteration counter.
\param barChar Single character to use for the filled portion of the bar.
\param emptyChar Single character to use for the not yet filled portion of the bar.
"""
super().__init__(message)
self._target = target
self._current = 0
# ETA's __init__ makes sure that target > 0
self._eta = ETA(target) if target else None
self._bar = self._FillingBar(barLength, barChar, emptyChar) \
if target \
else self._OscillatingBar(barLength)
# format string for text after bar
if target:
targetStr = f"{target:d}"
self._postFmt = "] ({:"+str(len(targetStr))+"d}/"+targetStr+") "
else:
self._postFmt = "] ({:3d}/?)"
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
self._current += amount
self.redraw()
def clear(self):
"""!Clear the current line of output."""
sys.stderr.write(self._FRONT+self._CLEAR)
def redraw(self):
"""!Clear the current bar and draw a new one."""
# enough to go to front, don't need to clear the line
sys.stderr.write(self._FRONT)
self.draw()
def draw(self):
"""!Draw the bar into the terminal at the current cursor position."""
# format string before bar
if self._eta:
eta = self._eta(self._current)
pre = " ETA: " \
+ (time.strftime("%H:%M:%S", time.localtime(eta)) if eta else "??:??:??") \
+ " ["
else:
pre = " ["
# format string after bar
post = self._postFmt.format(self._current)
# current total length of a line in the terminal
lineLength = terminalWidth()
# length available for messages
availLength = lineLength - len(pre) - len(post) - self._bar.length
if availLength < 10:
# not enough space to display everything, only show message
out = self._message[:lineLength-4]+" ["+ELLIPSIS+"]"
else:
# add spaces after message or abbreviate message depending on availLength
spaceLength = availLength - len(self._message)
msg = self._message+" "*spaceLength \
if spaceLength >= 0 \
else self._message[:availLength-1]+ELLIPSIS
# construct the full output string
out = msg+pre+self._bar.construct(self._current, self._target)+post
sys.stderr.write(out)
sys.stderr.flush()
def write(self, msg):
r"""!
Write a message to the terminal.
Clears the current progress bar on screen, writes the message,
appends a newline if needed and redraws the bar.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
self.clear()
if not msg.endswith("\n"):
sys.stderr.write(msg+"\n")
else:
sys.stderr.write(msg)
self.draw()
def finalize(self):
"""!Remove bar from screen and print a message showing the run time."""
self.clear()
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
class FileProgressbar(Progressbar):
r"""!
A progress indicator that writes individual update messages.
This is not really a progress 'bar' as it only prints simple messages
indicating progress, not an animated bar.
Is still writes to `sys.stderr` though, not directly to a file.
Use this class if `sys.stderr` is not connected to a terminal.
\warning Even though normal output does not interfere with this progress bar,
it is still better to use FileProgressbar.write() instead of
plain `print()` for uniformity with TerminalProgressbar.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
def __init__(self, target, message="", updateRate=1):
r"""!
Construct a new progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
May be `None` in which case the bar indicates only
general progress without showing how far away the goal is.
\param message A string that is displayed in front of the actual bar
and realted information.
\param updateRate The bar is only redrawn after updateRate number of steps.
"""
super().__init__(message)
self._target = target
self._updateRate = updateRate
self._current = 0
self._lastUpdated = -updateRate
# ETA's __init__ makes sure that target > 0
self._eta = ETA(target) if target else None
# format string for a counter
if target:
targetStr = f"{target:d}"
self._counterFmt = " ({:"+str(len(targetStr))+"d}/"+targetStr+") "
else:
self._counterFmt = " ({:3d}/?)"
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
self._current += amount
if self._current - self._updateRate >= self._lastUpdated:
self.redraw()
# go to nearest multiple of updateRate less than current
self._lastUpdated = (self._current // self._updateRate)*self._updateRate
def clear(self):
"""!Do nothing, cannot easily erase content from files."""
def redraw(self):
"""!Just call draw()."""
self.draw()
def draw(self):
"""!Print progress information."""
# format progress indication string
if self._eta:
eta = self._eta(self._current)
progStr = " ETA: " \
+ (time.strftime("%H:%M:%S", time.localtime(eta)) if eta else "??:??:??")
else:
progStr = ""
# format string after bar
progStr += self._counterFmt.format(self._current)
sys.stderr.write(self._message+progStr+"\n")
sys.stderr.flush()
def write(self, msg):
r"""!
Write out a message.
Just redirects to sys.stderr.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
sys.stderr.write(msg)
def finalize(self):
"""!Print a message showing the run time."""
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
def makeProgressbar(target, message="", updateRate=1):
r"""!
Construct a Progressbar.
Selects either TerminalProgressbar or FilePRogressbar
depending on whether `sys.stderr` is connected to a terminal
or not.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
\param message String to display with the progressbar.
\param updateRate The bar is only redrawn after updateRate number of steps.
Only used when writing to file.
\returns A new Progressbar instance.
"""
if stderrConnectedToTerm():
return TerminalProgressbar(target, message)
return FileProgressbar(target, message, updateRate)
@contextlib.contextmanager
def trackProgress(target, message="", updateRate=1):
r"""!
A context manager to track progress of an operation via a progress bar.
Sets up and returns a new progress bar.
The caller needs to advance that bar themselves.
\param target Target number of steps to track.
Can be `None` in which case the bar only indicates that something,
happens, not how far away the goal is.
\param message Message to display in front of the progress bar.
\returns A newly constructed instance of Progressbar.
\throws RuntimeError is a progress bar is already active.
"""
global _activeBar
if _activeBar is not None:
logging.getLogger(__name__).error("Cannot construct a new progress bar, "
"another one is already active.")
raise RuntimeError("A progress bar is already active.")
try:
_activeBar = makeProgressbar(target, message, updateRate)
yield _activeBar
_activeBar.finalize() # success => clean up
except:
# failure => leave bar visible and advance a line
sys.stderr.write("\n")
raise
finally:
# in any case the bar is now done
_activeBar = None
def progressRange(start, stop=None, step=1, message="", updateRate=1):
r"""!
Like built in `range()` but indicates progress using a Progressbar.
Parameters `start`, `stop`, and `step` behave the same way as for
the built in `range()` generator.
`message` is displayed in front of the progress bar.
"""
# mimic behavior of built in range
if stop is None:
stop = start
start = 0
with trackProgress(stop-start, message, updateRate) as pbar:
for cur in range(start, stop, step):
yield cur
# advance only up to stop
# If stop-start is not a multiple of step, advance(step) would overshoot.
pbar.advance(min(step, stop-cur))
########################################################################
# Logging
#
class ProgressStream:
"""!
A barbones output stream that writes to `sys.stderr` or
directs the output through a progress bar if one is active.
"""
def write(self, msg):
"""!Write msg to `sys.stderr`."""
if _activeBar is not None:
# The c++ logger sends spurious empty lines,
# just gobble them up.
if msg.strip():
_activeBar.write(msg)
else:
sys.stderr.write(msg)
class ColorFormatter(logging.Formatter):
"""!
A logging formatter that uses colors for loglevel and logger name.
Colors are encoded using ANSI escape sequences. If they are not supported,
the output shows extra characters. You should therefore pick a different
formatter if you write to file.
"""
## Colorized level names.
LEVELNAMES = {
logging.DEBUG: "[94mDEBUG[0m",
logging.INFO: "INFO",
logging.WARNING: "[33mWARNING[0m",
logging.ERROR: "[31mERROR[0m",
logging.CRITICAL: "[91mCRITICAL[0m",
}
def format(self, record):
"!Format a record using colors."
# Hack the colors into the record itself and let super do the heavy lifting.
record.levelname = self.LEVELNAMES[record.levelno]
record.name = f"[1m{record.name}[0m"
return super().format(record)
def _suppressGoogleLogWarning():
"""
Suppress warning emitted by absl.logging
'WARNING: Logging before flag parsing goes to stderr.'
Does nothing if abseil-py is not installed.
"""
try:
# Tensorflow uses Google's abseil-py library, which uses a Google-specific
# wrapper for logging. That wrapper will write a warning to sys.stderr if
# the Google command-line flags library has not been initialized.
#
# https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825
#
# We don't want this here because we have our own logging setup.
import absl.logging
# https://github.com/abseil/abseil-py/issues/99
logging.root.removeHandler(absl.logging._absl_handler)
# https://github.com/abseil/abseil-py/issues/102
absl.logging._warn_preinit_stderr = False
except Exception:
pass
def setupLogging(logfile=None, verbosity=0):
r"""!
Set up Python's logging framework.
The root logger is set up to output to terminal and file.
The former shows colored output if `sys.stderr` is connected to a terminal
and is aware of Progressbar.
\param logfile Write log to this file.
If `None`, no file output is performed.
\param verbosity Set logging level.
The minimum level for each handler is
`verbosity` | terminal | file
----------- | -------- | ----
0 | WARNING | INFO
1 | INFO | INFO
2 | DEBUG | DEBUG
\throws RuntimeError if this function is called more than once.
It is safe to discard this exception,
logging will still be set up properly.
"""
if setupLogging.isSetUp:
logging.getLogger(__name__).error("Called setupLoggi | > 2:
# can't be any noisier than that
verbosity = 2
# need at least this level so all messages get out
minLoglevel = logging.DEBUG if verbosity == 2 else logging.INFO
# No need for keeping track of threads in Python.
# In C++, all bets are off.
logging.logThreads = 0
# configure the root logger
logger = logging.getLogger("")
logger.setLevel(minLoglevel)
if logfile:
# output to file at least at level INFO and w/o colors
fh = logging.FileHandler(logfile, "w")
fh.setLevel(minLoglevel)
fh.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%Y-%m-%d %H:%M:%S"))
logger.addHandler(fh)
# and output to STDERR based on verbosity and possibly w/ colors
ch = logging.StreamHandler(stream=ProgressStream())
ch.setLevel((logging.WARNING, logging.INFO, logging.DEBUG)[verbosity])
if stderrConnectedToTerm():
ch.setFormatter(ColorFormatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%H:%M:%S"))
else:
ch.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%H:%M:%S"))
logger.addHandler(ch)
# done => Never run this code again!
setupLogging.isSetUp = True
setupLogging.isSetUp = False
########################################################################
# Argument parsing
#
def makeDefaultParser(defaultLog="none", **kwargs):
r"""!
Construct and return a new argument parser with the default arguments.
See isle.cli.addDefaultArgs().
\param defaultLog Default log file in case user does not supply --log argument.
\param **kwargs Passed to constructor of ArgumentParser.
"""
return addDefaultArgs(argparse.ArgumentParser(**kwargs), defaultLog)
def addDefaultArgs(parser, defaultLog="none"):
"""!Add default arguments common to all commands."""
parser.add_argument("--version", action="version",
version=f"Isle {isle.__version__}")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Make output more verbose, stacks.")
parser.add_argument("--log", default=defaultLog,
help="Specify log file name. Set to none to not write log file.")
return parser
def addContinueArgs(parser):
"""!Add arguments for continuation run to parser."""
parser.add_argument("infile", help="Input file.", type=Path)
parser.add_argument("-o", "--output", help="Output file",
type=Path, dest="outfile")
parser.add_argument("-i", "--initial", type=int, default=-1,
help="Initial checkpoint for HMC")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing output files")
parser.add_argument("-s", "--save-freq", type=int, default=None,
help="Save configurations every s trajectories, "
"computed from infile by default")
parser.add_argument("-c", "--checkpoint-freq", type=int, default=None,
help="Save checkpoints every c trajectories, "
"computed from infile by default")
requiredGrp = parser.add_argument_group("required named arguments")
requiredGrp.add_argument("-n", "--ntrajectories", type=int, required=True,
help="Number of trajectories to produce")
return parser
def addMeasArgs(parser):
"""!Add arguments for measurements to parser."""
parser.add_argument("infile", help="Input file", type=Path)
parser.add_argument("-o", "--output", help="Output file", type=Path, dest="outfile")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing output file.")
return parser
def addShowArgs(parser):
"""!Add arguments for reporting to parser."""
reporters = ["overview", "lattice", "correlator", "tuning"]
class _ReportAction(argparse.Action):
"""!custom action to parse reporters."""
def __call__(self, parser, namespace, values, option_string=None):
if "all" in values:
setattr(namespace, self.dest, reporters)
else:
setattr(namespace, self.dest, values.split(","))
parser.add_argument("input", help="Input file", nargs="+", type=Path)
parser.add_argument("-r", "--report", action=_ReportAction, metavar="", default=["overview"],
help="Comma separated list of reporters to use. Allowed values are ["
+",".join(reporters)+",all] Defaults to overview.")
return parser
def _makeParser(argParser, **kwargs):
"""!Make an argument parser from given command name and keyword arguments."""
cmdArgMap = {"continue": addContinueArgs,
"meas": addMeasArgs,
"show": addShowArgs,
"default": lambda parser: parser}
defaultLog = {"continue": "isle.hmc.log",
"meas": "isle.meas.log",
"show": "none",
"default": "isle.log"}
if not argParser in cmdArgMap:
# this is pre logging setup => do it the ugly way
print(f"Error: requested argParser name not supported: {argParser}")
raise ValueError("Error: requested argParser name not supported")
return cmdArgMap[argParser](makeDefaultParser(defaultLog=defaultLog[argParser], **kwargs))
########################################################################
# The one function to control all the rest.
#
def init(argParser="default", defaultLog=None, verbosity=0, **kwargs):
r"""!
Initialize command line interface.
This function must be called before any I/O as it sets up the logging framework.
\param argParser Command line argument parser. Can be
- `argparse.ArgumentParser`: Use this parser as is.
- `str`: Construct a parser based on this command name.
See `add*Args` functions.
- `None`: Don't parse any command line arguments.
Log file and verbosity are set to the values provided in
corresponding function parameters.
\param defaultLog Log file to use if `argParser is None`.
\param verbosity Output verbisity level to use if `argParser is None`.
\param **kwargs Passed to isle.cli.makeDefaultParser() if `argParser` is a string.
\returns Parsed arguments.
"""
if argParser is not None:
if isinstance(argParser, str):
# construct new parser based on command name
args = _makeParser(argParser, **kwargs).parse_args()
else:
# use provided parser
args = argParser.parse_args()
defaultLog = None if not hasattr(args, "log") or args.log.lower() == "none" else args.log
verbosity = args.verbose if hasattr(args, "verbose") else 0
else:
# don't parse anything, use default values
args = None
setupLogging(defaultLog, verbosity=verbosity)
return args
| ng a second time."
"This function must be called *exactly* once.")
raise RuntimeError("Logging already set up")
_suppressGoogleLogWarning()
if verbosity | conditional_block |
cli.py | """! \file
Utilities for command line interfaces.
The default interface can be set up using isle.cli.init().
More control is available through the lower level functions.
"""
from abc import ABCMeta, abstractmethod
import argparse
import contextlib
import logging
from pathlib import Path
import random
import shutil
import sys
import time
import isle
# the active progress bar
# Yes, yes a global variable but we need singletons here!
_activeBar = None
## Unicode ellipsis string.
ELLIPSIS = "…"
########################################################################
# General stuff
#
def terminalWidth():
"""!
Return the current number of columns of the terminal.
\note This does not give the proper size if `sys.stdout` is
redirected to a file.
"""
return shutil.get_terminal_size().columns
def stderrConnectedToTerm():
"""!Return True if stderr is connected to a terminal, False otherwise."""
return sys.stderr.isatty()
########################################################################
# Progress bars
#
class ETA:
"""!
Estimate the time of arrival for iterations.
The ETA is computed from a linear regression to the starting time
and current time (time at execution of the __call__ method).
This is not very stable for strongly changing durations of individual iterations
but gives a good estimate for mostly stable durations.
The start time is only set after the first iteration
(calling __call__ with current > 0).
This is because the first iteration might perform some expensive setup
operations the following iterations do not have to repeat.
In such a case, using the time at current=0 would introduce a bias
to the ETA.
Given an initial iteration xi which was started at time ti
and a current iteration xc and current time tc
the estimated final time (ETA) is
tf = mc * (xf - xi) + ti
where
mc = (tc - ti) / (xc - xi).
"""
def __init__(self, target):
"""!Initialize with a given target iteration number."""
if target <= 1:
raise ValueError(f"Target iteration of ETA must be > 1, got {target}")
self.targetIteration = target
self._ti = None # initial time
self._xi = None # iteration when initial time was measured
def __call__(self, current):
r"""!
Estimate the time of arrival given a current iteration.
\param current Iteration number the loop is currently at.
\returns - Estimated time of arrival in seconds since epoch.
- `None` if no starting time has been set yet or `current`
is below or equal to starting iteration.
"""
# can't estimate time yet
if self._ti is None:
# initial time is time after first iteration or later
if current > 0:
self._ti = time.time()
self._xi = current
return None
# The method might be called multiple times during the same iteration.
# But cannot estimate while still at initial iteration (xi).
if current <= self._xi:
return None
# do linear regression
tc = time.time()
return (tc-self._ti)/(current-self._xi) * (self.targetIteration-self._xi) + self._ti
def reset(self, target):
"""!Re-initialize with a given target iteration number."""
if target <= 1:
raise ValueError(f"Target iteration of ETA must be > 1, got {target}")
self.targetIteration = target
self._ti = None # initial time
self._xi = None # iteration when initial time was measured
class Progressbar(metaclass=ABCMeta):
r"""!
Abstract base class for progress bars.
\warning Any and all terminal output must be made through TerminalProgressbar.write()
while a progress bar is active!
Otherwise it will interfere with the progress bar and might get erased
when the bar is redrawn.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
def __init__(self, message=""):
r"""!
Construct a new base progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param message A string that is displayed in front of the actual bar
and realted information.
"""
self._message = message
self._startTime = time.time()
@abstractmethod
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
raise NotImplementedError()
@abstractmethod
def clear(self):
"""!Clear the current line of output."""
raise NotImplementedError()
@abstractmethod
def redraw(self):
"""!Clear the current bar and draw a new one."""
raise NotImplementedError()
@abstractmethod
def draw(self):
"""!Draw the bar."""
raise NotImplementedError()
def write(self, msg):
r"""!
Write a message to the terminal.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
sys.stderr.write(msg)
def finalize(self):
"""!Remove bar from screen and show a message showing the run time."""
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
class TerminalProgressbar(Progressbar):
r"""!
A progress bar that is shown in the terminal via sys.stdio.
Needs to modify the current line of output in order to animate the progress bar.
This is only possible when the output is indeed connected to a terminal.
If sys.stderr is piped into a file, this class cannot operate properly.
\warning Any and all terminal output must be made through TerminalProgressbar.write()
while a progress bar is active!
Otherwise it will interfere with the progress bar and might get erased
when the bar is redrawn.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
# Escape sequence to clear the current line right of the cursor.
_CLEAR = "[K"
# Escape sequence to move the cursor to the beginning of the current line.
_FRONT = "[G"
class _FillingBar:
"""!A bar that fills up over time approaching a target."""
def __init__(self, length, filledChar, emptyChar):
self.length = length
self._filledChar = filledChar
self._emptyChar = emptyChar
def construct(self, current, target):
"""!Construct a string representing the bar from a current and target fill status."""
nfilled = int(current / target * self.length)
return self._filledChar*nfilled + self._emptyChar*(self.length-nfilled)
class _OscillatingBar:
"""!A 'bar' that oscillates randomly for cases where no target is known."""
# element [i][j] transitions from height i to height j
_PIECES = [["⠤", "⠴", "⠼"],
["⠲", "⠒", "⠚"],
["⠹", "⠙", "⠉"]]
# extended 4-dot pieces, looks a bit odd because the lower most dots are very low
# _PIECES = [
# ["⣀", "⣠", "⣰", "⣸"],
# ["⢤", "⠤", "⠴", "⠼"],
# ["⢲", "⠲", "⠒", "⠚"],
# ["⢹", "⠹", "⠙", "⠉"]
# ]
def __init__(self, length):
self.length = length
self._rng = random.Random()
self._currentHeight = 1
self._barStr = self._PIECES[self._currentHeight][self._currentHeight]*length
def construct(self, _current, _target):
"""!Construct a string representing the bar, arguments are ignored."""
h = self._rng.randint(0, 2) # new height
# shift by one and add new element
self._barStr = self._barStr[1:]+self._PIECES[self._currentHeight][h]
self._currentHeight = h
return self._barStr
def __init__(self, target, message="", barLength=40,
barChar="#", emptyChar="-"):
r"""!
Construct a new progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
May be `None` in which case the bar indicates only
general progress without showing how far away the goal is.
\param message A string that is displayed in front of the actual bar
and realted information.
\param barLength Number of characters the bar itself occupies in the terminal.
Does not include ETA and iteration counter.
\param barChar Single character to use for the filled portion of the bar.
\param emptyChar Single character to use for the not yet filled portion of the bar.
"""
super().__init__(message)
self._target = target
self._current = 0
# ETA's __init__ makes sure that target > 0
self._eta = ETA(target) if target else None
self._bar = self._FillingBar(barLength, barChar, emptyChar) \
if target \
else self._OscillatingBar(barLength)
# format string for text after bar
if target:
targetStr = f"{target:d}"
self._postFmt = "] ({:"+str(len(targetStr))+"d}/"+targetStr+") "
else:
self._postFmt = "] ({:3d}/?)"
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
self._current += amount
self.redraw()
def clear(self):
"""!Clear the current line of output."""
sys.stderr.write(self._FRONT+self._CLEAR)
def redraw(self):
"""!Clear the current bar and draw a new one."""
# enough to go to front, don't need to clear the line
sys.stderr.write(self._FRONT)
self.draw()
def draw(self):
"""!Draw the bar into the terminal at the current cursor position."""
# format string before bar
if self._eta:
eta = self._eta(self._current)
pre = " ETA: " \
+ (time.strftime("%H:%M:%S", time.localtime(eta)) if eta else "??:??:??") \
+ " ["
else:
pre = " ["
# format string after bar
post = self._postFmt.format(self._current)
# current total length of a line in the terminal
lineLength = terminalWidth()
# length available for messages
availLength = lineLength - len(pre) - len(post) - self._bar.length
if availLength < 10:
# not enough space to display everything, only show message
out = self._message[:lineLength-4]+" ["+ELLIPSIS+"]"
else:
# add spaces after message or abbreviate message depending on availLength
spaceLength = availLength - len(self._message)
msg = self._message+" "*spaceLength \
if spaceLength >= 0 \
else self._message[:availLength-1]+ELLIPSIS
# construct the full output string
out = msg+pre+self._bar.construct(self._current, self._target)+post
sys.stderr.write(out)
sys.stderr.flush()
def write(self, msg):
r"""!
Write a message to the terminal.
Clears the current progress bar on screen, writes the message,
appends a newline if needed and redraws the bar.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
self.clear()
if not msg.endswith("\n"):
sys.stderr.write(msg+"\n")
else:
sys.stderr.write(msg)
self.draw()
def finalize(self):
"""!Remove bar from screen and print a message showing the run time."""
self.clear()
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
class FileProgressbar(Progressbar):
r"""!
A progress indicator that writes individual update messages.
This is not really a progress 'bar' as it only prints simple messages
indicating progress, not an animated bar.
Is still writes to `sys.stderr` though, not directly to a file.
Use this class if `sys.stderr` is not connected to a terminal.
\warning Even though normal output does not interfere with this progress bar,
it is still better to use FileProgressbar.write() instead of
plain `print()` for uniformity with TerminalProgressbar.
When set up correctly (via `setupLogging()`), loggers handle output properly.
"""
def __init__(self, target, message="", updateRate=1):
r"""!
Construct a new progress bar.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
May be `None` in which case the bar indicates only
general progress without showing how far away the goal is.
\param message A string that is displayed in front of the actual bar
and realted information.
\param updateRate The bar is only redrawn after updateRate number of steps.
"""
super().__init__(message)
self._target = target
self._updateRate = updateRate
self._current = 0
self._lastUpdated = -updateRate
# ETA's __init__ makes sure that target > 0
self._eta = ETA(target) if target else None
# format string for a counter
if target:
targetStr = f"{target:d}"
self._counterFmt = " ({:"+str(len(targetStr))+"d}/"+targetStr+") "
else:
self._counterFmt = " ({:3d}/?)"
def advance(self, amount=1):
"""!Advance the bar by a given amount, redraws the bar."""
self._current += amount
if self._current - self._updateRate >= self._lastUpdated:
self.redraw()
# go to nearest multiple of updateRate less than current
self._lastUpdated = (self._current // self._updateRate)*self._updateRate
def clear(self):
"""!Do nothing, cannot easily erase content from files."""
def redraw(self):
"""!Just call draw()."""
self.draw()
def draw(self):
"""!Print progress information."""
# format progress indication string
if self._eta:
eta = self._eta(self._current)
progStr = " ETA: " \
+ (time.strftime("%H:%M:%S", time.localtime(eta)) if eta else "??:??:??")
else:
progStr = ""
# format string after bar
progStr += self._counterFmt.format(self._current)
sys.stderr.write(self._message+progStr+"\n")
sys.stderr.flush()
def write(self, msg):
r"""!
Write out a message.
Just redirects to sys.stderr.
\attention Use exclusively this function to write to the terminal
while a progress bar is being displayed.
"""
sys.stderr.write(msg)
def finalize(self):
"""!Print a message showing the run time."""
sys.stderr.write(f"{self._message} finished after {(time.time()-self._startTime):.1f}s "
"at "+time.strftime("%H:%M:%S", time.localtime())+" \n")
def makeProgressbar(target, message="", updateRate=1):
r"""!
Construct a Progressbar.
Selects either TerminalProgressbar or FilePRogressbar
depending on whether `sys.stderr` is connected to a terminal
or not.
\warning Never have more than one active instance at the same time,
they interfere with each other.
\param target Targeted number of iterations.
\param message String to display with the progressbar.
\param updateRate The bar is only redrawn after updateRate number of steps.
Only used when writing to file.
\returns A new Progressbar instance.
"""
if stderrConnectedToTerm():
return TerminalProgressbar(target, message)
return FileProgressbar(target, message, updateRate)
@contextlib.contextmanager
def trackProgress(target, message="", updateRate=1):
r"""!
A context manager to track progress of an operation via a progress bar.
Sets up and returns a new progress bar.
The caller needs to advance that bar themselves.
\param target Target number of steps to track.
Can be `None` in which case the bar only indicates that something,
happens, not how far away the goal is.
\param message Message to display in front of the progress bar.
\returns A newly constructed instance of Progressbar.
\throws RuntimeError is a progress bar is already active.
"""
global _activeBar
if _activeBar is not None:
logging.getLogger(__name__).error("Cannot construct a new progress bar, "
"another one is already active.")
raise RuntimeError("A progress bar is already active.")
try:
_activeBar = makeProgressbar(target, message, updateRate)
yield _activeBar
_activeBar.finalize() # success => clean up
except:
# failure => leave bar visible and advance a line
sys.stderr.write("\n")
raise
finally:
# in any case the bar is now done
_activeBar = None
def progressRange(start, stop=None, step=1, message="", updateRate=1):
r"""!
Like built in `range()` but indicates progress using a Progressbar.
Parameters `start`, `stop`, and `step` behave the same way as for
the built in `range()` generator.
`message` is displayed in front of the progress bar.
"""
# mimic behavior of built in range
if stop is None:
stop = start
start = 0
with trackProgress(stop-start, message, updateRate) as pbar:
for cur in range(start, stop, step):
yield cur
# advance only up to stop
# If stop-start is not a multiple of step, advance(step) would overshoot.
pbar.advance(min(step, stop-cur))
########################################################################
# Logging
#
class ProgressStream:
"""!
A barbones output stream that writes to `sys.stderr` or
directs the output through a progress bar if one is active.
"""
def write(self, msg):
"""!Write msg to `sys.stderr`."""
if _activeBar is not None:
# The c++ logger sends spurious empty lines,
# just gobble them up.
if msg.strip():
_activeBar.write(msg)
else:
sys.stderr.write(msg)
class ColorFormatter(logging.Formatter):
"""!
A logging formatter that uses colors for loglevel and logger name.
Colors are encoded using ANSI escape sequences. If they are not supported,
the output shows extra characters. You should therefore pick a different
formatter if you write to file.
"""
## Colorized level names.
LEVELNAMES = {
logging.DEBUG: "[94mDEBUG[0m",
logging.INFO: "INFO",
logging.WARNING: "[33mWARNING[0m",
logging.ERROR: "[31mERROR[0m",
logging.CRITICAL: "[91mCRITICAL[0m",
}
def format(self, record):
"!Format a record using colors."
# Hack the colors into the record itself and let super do the heavy lifting.
record.levelname = self.LEVELNAMES[record.levelno]
record.name = f"[1m{record.name}[0m"
return super().format(record)
def _suppressGoogleLogWarning():
"""
Suppress warning emitted by absl.logging
'WARNING: Logging before flag parsing goes to stderr.'
Does nothing if abseil-py is not installed.
"""
try:
# Tensorflow uses Google's abseil-py library, which uses a Google-specific
# wrapper for logging. That wrapper will write a warning to sys.stderr if
# the Google command-line flags library has not been initialized.
#
# https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825
#
# We don't want this here because we have our own logging setup.
import absl.logging
# https://github.com/abseil/abseil-py/issues/99
logging.root.removeHandler(absl.logging._absl_handler)
# https://github.com/abseil/abseil-py/issues/102
absl.logging._warn_preinit_stderr = False
except Exception:
pass
def setupLogging(logfile=None, verbosity=0):
r"""!
Set up Python's logging framework.
The root logger is set up to output to terminal and file.
The former shows colored output if `sys.stderr` is connected to a terminal
and is aware of Progressbar.
\param logfile Write log to this file.
If `None`, no file output is performed.
\param verbosity Set logging level.
The minimum level for each handler is
`verbosity` | terminal | file
----------- | -------- | ----
0 | WARNING | INFO
1 | INFO | INFO
2 | DEBUG | DEBUG
\throws RuntimeError if this function is called more than once.
It is safe to discard this exception,
logging will still be set up properly.
"""
if setupLogging.isSetUp:
logging.getLogger(__name__).error("Called setupLogging a second time."
"This function must be called *exactly* once.")
raise RuntimeError("Logging already set up")
_suppressGoogleLogWarning()
if verbosity > 2:
# can't be any noisier than that
verbosity = 2
# need at least this level so all messages get out
minLoglevel = logging.DEBUG if verbosity == 2 else logging.INFO
# No need for keeping track of threads in Python.
# In C++, all bets are off.
logging.logThreads = 0
# configure the root logger
logger = logging.getLogger("")
logger.setLevel(minLoglevel)
if logfile:
# output to file at least at level INFO and w/o colors
fh = logging.FileHandler(logfile, "w")
fh.setLevel(minLoglevel)
fh.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%Y-%m-%d %H:%M:%S"))
logger.addHandler(fh)
# and output to STDERR based on verbosity and possibly w/ colors
ch = logging.StreamHandler(stream=ProgressStream())
ch.setLevel((logging.WARNING, logging.INFO, logging.DEBUG)[verbosity])
if stderrConnectedToTerm():
ch.setFormatter(ColorFormatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%H:%M:%S"))
else:
ch.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s in %(name)s: %(message)s",
"%H:%M:%S"))
logger.addHandler(ch)
# done => Never run this code again!
setupLogging.isSetUp = True
setupLogging.isSetUp = False
########################################################################
# Argument parsing
#
def makeDefaultParser(defaultLog="none", **kwargs):
r"""!
Construct and return a new argument parser with the default arguments.
See isle.cli.addDefaultArgs().
\param defaultLog Default log file in case user does not supply --log argument.
\param **kwargs Passed to constructor of ArgumentParser.
"""
return addDefaultArgs(argparse.ArgumentParser(**kwargs), defaultLog)
def addDefaultArgs(parser, defaultLog="none"):
"""!Add default arguments common to all commands."""
parser.add_argument("--version", action="version",
version=f"Isle {isle.__version__}")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Make output more verbose, stacks.")
parser.add_argument("--log", default=defaultLog,
help="Specify log file name. Set to none to not write log file.")
return parser
def addContinueArgs(parser):
"""!Add arguments for continuation run to parser."""
parser.add_argument("infile", help="Input file.", type=Path)
parser.add_argument("-o", "--output", help="Output file",
type=Path, dest="outfile")
parser.add_argument("-i", "--initial", type=int, default=-1,
help="Initial checkpoint for HMC")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing output files")
parser.add_argument("-s", "--save-freq", type=int, default=None,
help="Save configurations every s trajectories, "
"computed from infile by default")
parser.add_argument("-c", "--checkpoint-freq", type=int, default=None,
help="Save checkpoints every c trajectories, "
"computed from infile by default")
requiredGrp = parser.add_argument_group("required named arguments")
requiredGrp.add_argument("-n", "--ntrajectories", type=int, required=True,
help="Number of trajectories to produce")
return parser
def addMeasArgs(parser):
"""!Add arguments for measurements to parser."""
parser.add_argument("infile", help="Input file", type=Path)
parser.add_argument("-o", "--output", help="Output file", type=Path, dest="outfile")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing output file.")
return parser
def addShowArgs(parser):
"""!Add arguments for reporting to parser."""
reporters = ["overview", "lattice", "correlator", "tuning"]
class _ReportAction(argparse.Action):
"""!custom action to parse reporters."""
def __call__(self, parser, namespace, values, option_string=None):
if "all" in values:
setattr(namespace, self.dest, reporters)
else:
setattr(namespace, self.dest, values.split(","))
parser.add_argument("input", help="Input file", nargs="+", type=Path)
parser.add_argument("-r", "--report", action=_ReportAction, metavar="", default=["overview"],
help="Comma separated list of reporters to use. Allowed values are ["
+",".join(reporters)+",all] Defaults to overview.")
return parser
def _makeParser(argParser, **kwargs):
"""!Make an argument parser from given command name and keyword arguments."""
cmdArgMap = {"continue": addContinueArgs,
"meas": addMeasArgs,
"show": addShowArgs,
"default": lambda parser: parser}
defaultLog = {"continue": "isle.hmc.log",
"meas": "isle.meas.log",
"show": "none",
"default": "isle.log"}
if not argParser in cmdArgMap:
# this is pre logging setup => do it the ugly way
print(f"Error: requested argParser name not supported: {argParser}")
raise ValueError("Error: requested argParser name not supported")
return cmdArgMap[argParser](makeDefaultParser(defaultLog=defaultLog[argParser], **kwargs))
########################################################################
# The one function to control all the rest.
#
def init(argParser="default", defaultLog=None, verbosity=0, **kwargs):
r"""!
Initialize command line interface.
This function must be called before any I/O as it sets up the logging framework.
\param argParser Command line argument parser. Can be
- `argparse.ArgumentParser`: Use this parser as is.
- `str`: Construct a parser based on this command name.
See `add*Args` functions.
- `None`: Don't parse any command line arguments.
Log file and verbosity are set to the values provided in
corresponding function parameters.
\param defaultLog Log file to use if `argParser is None`.
\param verbosity Output verbisity level to use if `argParser is None`.
\param **kwargs Passed to isle.cli.makeDefaultParser() if `argParser` is a string.
\returns Parsed arguments.
"""
if argParser is not None: | args = argParser.parse_args()
defaultLog = None if not hasattr(args, "log") or args.log.lower() == "none" else args.log
verbosity = args.verbose if hasattr(args, "verbose") else 0
else:
# don't parse anything, use default values
args = None
setupLogging(defaultLog, verbosity=verbosity)
return args | if isinstance(argParser, str):
# construct new parser based on command name
args = _makeParser(argParser, **kwargs).parse_args()
else:
# use provided parser | random_line_split |
report.rs | //! Coverage report generation.
//!
//! # Template directory structure
//!
//! The coverage report produce two classes of files:
//!
//! * One summary page
//! * Source file pages, one page per source.
//!
//! `cargo cov` uses [Tera templates](https://github.com/Keats/tera#readme). Template files are stored using this
//! directory structure:
//!
//! ```text
//! cargo-cov/res/templates/«name»/
//! config.toml
//! tera/
//! summary_template.ext
//! file_template.ext
//! ...
//! static/
//! common.css
//! common.js
//! ...
//! ```
//!
//! When rendered, the output will have this structure:
//!
//! ```text
//! /path/to/workspace/target/cov/report/
//! static/
//! common.css
//! common.js
//! ...
//! summary.ext
//! file_123.ext
//! file_124.ext
//! ...
//! ```
//!
//! # Summary page
//!
//! If a summary page is needed, add the following section to `config.toml`:
//!
//! ```toml
//! [summary]
//! template = "summary_template.ext"
//! output = "summary.ext"
//! ```
//!
//! The summary page will be rendered to the file `summary.ext` using this data:
//!
//! ```json
//! {
//! "crate_path": "/path/to/workspace",
//! "files": [
//! {
//! "symbol": 123,
//! "path": "/path/to/workspace/src/lib.rs",
//! "summary": {
//! "lines_count": 500,
//! "lines_covered": 499,
//! "branches_count": 700,
//! "branches_executed": 650,
//! "branches_taken": 520,
//! "functions_count": 40,
//! "functions_called": 39
//! }
//! },
//! ...
//! ]
//! }
//! ```
//!
//! # File pages
//!
//! If the file pages are needed, add the following section to `config.toml`:
//!
//! ```toml
//! [summary]
//! template = "file_template.ext"
//! output = "file_{{ symbol }}.ext"
//! ```
//!
//! The output filename itself is a Tera template. The file pages will be rendered using this data:
//!
//! ```json
//! {
//! "crate_path": "/path/to/workspace",
//! "symbol": 123,
//! "path": "/path/to/workspace/src/lib.rs",
//! "summary": {
//! "lines_count": 500,
//! ...
//! },
//! "lines": [
//! {
//! "line": 1,
//! "source": "/// First line of the source code",
//! "count": null,
//! "branches": []
//! },
//! {
//! "line": 2,
//! "source": "pub fn second_line_of_source_code() {",
//! "count": 12,
//! "branches": [
//! {
//! "count": 6,
//! "symbol": 456,
//! "path": "/path/to/workspace/src/lib.rs",
//! "line": 3,
//! "column: 0
//! },
//! ...
//! ]
//! },
//! ...
//! ],
//! "functions": [
//! {
//! "symbol": 789,
//! "name": "_ZN10crate_name26second_line_of_source_code17hce04ea776f1a67beE",
//! "line": 2,
//! "column": 0,
//! "summary": {
//! "blocks_count": 100,
//! "blocks_executed": 90,
//! "entry_count": 12,
//! "exit_count": 10,
//! "branches_count": 250,
//! "branches_executed": 225,
//! "branches_taken": 219
//! }
//! },
//! ...
//! ]
//! }
//! ```
use error::{Result, ResultExt};
use sourcepath::{SourceType, identify_source_path};
use template::new as new_template;
use utils::{clean_dir, parent_3};
use copy_dir::copy_dir;
use cov::{self, Gcov, Graph, Interner, Report, Symbol};
use serde_json::Value;
use tera::{Context, Tera};
use std::ffi::OsStr;
use std::fs::{File, create_dir_all, read_dir};
use std::io::{BufRead, BufReader, Read, Write};
use std::path::{Path, PathBuf};
/// Entry point of `cargo cov report` subcommand. Renders the coverage report using a template.
pub fn generate(cov_build_path: &Path, template_name: &OsStr, allowed_source_types: SourceType) -> Result<Option<PathBuf>> {
let report_path = cov_build_path.with_file_name("report");
clean_dir(&report_path).chain_err(|| "Cannot clean report directory")?;
create_dir_all(&report_path)?;
let mut interner = Interner::new();
let graph = create_graph(cov_build_path, &mut interner).chain_err(|| "Cannot create graph")?;
let report = graph.report();
render(&report_path, template_name, allowed_source_types, &report, &interner).chain_err(|| "Cannot render report")
}
/// Creates an analyzed [`Graph`] from all GCNO and GCDA inside the `target/cov/build` folder.
///
/// [`Graph`]: ../../cov/graph/struct.Graph.html
fn create_graph(cov_build_path: &Path, interner: &mut Interner) -> cov::Result<Graph> {
let mut graph = Graph::default();
for extension in &["gcno", "gcda"] {
progress!("Parsing", "*.{} files", extension);
for entry in read_dir(cov_build_path.join(extension))? {
let path = entry?.path();
if path.extension() == Some(OsStr::new(extension)) {
trace!("merging {} {:?}", extension, path);
graph.merge(Gcov::open(path, interner)?)?;
}
}
}
graph.analyze();
Ok(graph)
}
/// Renders the `report` into `report_path` using a template.
///
/// If the template has a summary page, returns the path of the rendered summary.
fn render(report_path: &Path, template_name: &OsStr, allowed_source_types: SourceType, report: &Report, interner: &Interner) -> Result<Option<PathBuf>> {
use toml::de::from_slice;
let mut template_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
template_path.push("res");
template_path.push("templates");
template_path.push(template_name);
trace!("using templates at {:?}", template_path);
// Read the template configuration.
template_path.push("config.toml");
let mut config_file = File::open(&template_path).chain_err(|| format!("Cannot open template at `{}`", template_path.display()))?;
let mut config_bytes = Vec::new();
config_file.read_to_end(&mut config_bytes)?;
let config: Config = from_slice(&config_bytes).chain_err(|| "Cannot read template configuration")?;
// Copy the static resources if exist.
template_path.set_file_name("static");
if template_path.is_dir() {
copy_dir(&template_path, report_path.join("static"))?;
}
template_path.set_file_name("tera");
template_path.push("*");
// The report path is at $crate/target/cov/report, so we call .parent() three times.
let crate_path = parent_3(report_path).to_string_lossy();
let mut tera = new_template(template_path.to_str().expect("UTF-8 template path"))?;
let mut report_files = report
.files
.iter()
.filter_map(|(&symbol, file)| {
let path = &interner[symbol];
let source_type = identify_source_path(path, &crate_path).0;
if allowed_source_types.contains(source_type) {
Some(ReportFileEntry {
symbol,
source_type,
path,
file,
})
} else {
None
}
})
.collect::<Vec<_>>();
report_files.sort_by_key(|entry| (entry.source_type, entry.path));
let summary_path = if let Some(summary) = config.summary {
Some(write_summary(report_path, &report_files, &tera, &crate_path, &summary).chain_err(|| "Cannot write summary")?)
} else {
None
};
if let Some(files_config) = config.files {
tera.add_raw_template("<filename>", files_config.output)?;
for entry in &report_files {
write_file(report_path, interner, entry, &tera, &crate_path, files_config.template).chain_err(|| format!("Cannot write file at `{}`", entry.path))?;
}
}
Ok(summary_path)
}
struct ReportFileEntry<'a> {
symbol: Symbol,
source_type: SourceType,
path: &'a str,
file: &'a ::cov::report::File,
}
#[derive(Deserialize, Debug)]
struct Config<'a> {
#[serde(borrow)]
summary: Option<FileConfig<'a>>,
#[serde(borrow)]
files: Option<FileConfig<'a>>,
}
#[derive(Deserialize, Debug)]
struct FileConfig<'a> {
#[serde(borrow)]
output: &'a str,
#[serde(borrow)]
template: &'a str,
}
/// Renders the summary page.
fn wr | eport_path: &Path, report_files: &[ReportFileEntry], tera: &Tera, crate_path: &str, config: &FileConfig) -> Result<PathBuf> {
let path = report_path.join(config.output);
let mut context = Context::new();
let files = report_files
.iter()
.map(|entry| {
json!({
"symbol": entry.symbol,
"path": entry.path,
"summary": entry.file.summary(),
})
})
.collect::<Vec<_>>();
context.add("crate_path", &crate_path);
context.add("files", &files);
let rendered = tera.render(config.template, &context)?;
let mut summary_file = File::create(&path)?;
summary_file.write_all(rendered.as_bytes())?;
progress!("Created", "{}", path.display());
Ok(path)
}
/// Renders report for a source path.
fn write_file(report_path: &Path, interner: &Interner, entry: &ReportFileEntry, tera: &Tera, crate_path: &str, template_name: &str) -> Result<()> {
let mut context = Context::new();
let mut lines = Vec::new();
let mut source_line_number = 1;
// Read the source file.
if let Ok(source_file) = File::open(entry.path) {
let source_file = BufReader::new(source_file);
for source_line in source_file.lines() {
let (count, branches) = if let Some(line) = entry.file.lines.get(&source_line_number) {
let (count, branches) = serialize_line(line, interner);
(Some(count), branches)
} else {
(None, Vec::new())
};
lines.push(json!({
"line": source_line_number,
"source": source_line?,
"count": count,
"branches": branches,
}));
source_line_number += 1;
}
}
// Add the remaining lines absent from the source file.
lines.extend(entry.file.lines.range(source_line_number..).map(|(line_number, line)| {
let (count, branches) = serialize_line(line, interner);
json!({
"line": *line_number,
"count": Some(count),
"source": Value::Null,
"branches": branches,
})
}));
// Collect function info
let functions = entry
.file
.functions
.iter()
.map(|f| {
let name = &interner[f.name];
json!({
"symbol": f.name,
"name": name,
"line": f.line,
"column": f.column,
"summary": &f.summary,
})
})
.collect::<Vec<_>>();
context.add("crate_path", &crate_path);
context.add("symbol", &entry.symbol);
context.add("path", &entry.path);
context.add("summary", &entry.file.summary());
context.add("lines", &lines);
context.add("functions", &functions);
let filename = tera.render("<filename>", &context)?;
let path = report_path.join(filename);
let rendered = tera.render(template_name, &context)?;
let mut file_file = File::create(path)?;
file_file.write_all(rendered.as_bytes())?;
Ok(())
}
/// Serializes a source line as a branch target into JSON value.
fn serialize_line(line: &::cov::report::Line, interner: &Interner) -> (u64, Vec<Value>) {
(
line.count,
line.branches
.iter()
.map(|branch| {
json!({
"count": branch.count,
"symbol": branch.filename,
"path": &interner[branch.filename],
"line": branch.line,
"column": branch.column,
})
})
.collect(),
)
}
| ite_summary(r | identifier_name |
report.rs | //! Coverage report generation.
//!
//! # Template directory structure
//!
//! The coverage report produce two classes of files:
//!
//! * One summary page
//! * Source file pages, one page per source.
//!
//! `cargo cov` uses [Tera templates](https://github.com/Keats/tera#readme). Template files are stored using this
//! directory structure:
//!
//! ```text
//! cargo-cov/res/templates/«name»/
//! config.toml
//! tera/
//! summary_template.ext
//! file_template.ext
//! ...
//! static/
//! common.css
//! common.js
//! ...
//! ```
//!
//! When rendered, the output will have this structure:
//!
//! ```text
//! /path/to/workspace/target/cov/report/
//! static/
//! common.css
//! common.js
//! ...
//! summary.ext
//! file_123.ext
//! file_124.ext
//! ...
//! ```
//!
//! # Summary page
//!
//! If a summary page is needed, add the following section to `config.toml`:
//!
//! ```toml
//! [summary]
//! template = "summary_template.ext"
//! output = "summary.ext"
//! ```
//!
//! The summary page will be rendered to the file `summary.ext` using this data:
//!
//! ```json
//! {
//! "crate_path": "/path/to/workspace",
//! "files": [
//! {
//! "symbol": 123,
//! "path": "/path/to/workspace/src/lib.rs",
//! "summary": {
//! "lines_count": 500,
//! "lines_covered": 499,
//! "branches_count": 700,
//! "branches_executed": 650,
//! "branches_taken": 520,
//! "functions_count": 40,
//! "functions_called": 39
//! }
//! },
//! ...
//! ]
//! }
//! ```
//!
//! # File pages
//!
//! If the file pages are needed, add the following section to `config.toml`:
//!
//! ```toml
//! [summary]
//! template = "file_template.ext"
//! output = "file_{{ symbol }}.ext"
//! ```
//!
//! The output filename itself is a Tera template. The file pages will be rendered using this data:
//!
//! ```json
//! {
//! "crate_path": "/path/to/workspace",
//! "symbol": 123,
//! "path": "/path/to/workspace/src/lib.rs",
//! "summary": {
//! "lines_count": 500,
//! ...
//! },
//! "lines": [
//! {
//! "line": 1,
//! "source": "/// First line of the source code",
//! "count": null,
//! "branches": []
//! },
//! {
//! "line": 2,
//! "source": "pub fn second_line_of_source_code() {",
//! "count": 12,
//! "branches": [
//! {
//! "count": 6,
//! "symbol": 456,
//! "path": "/path/to/workspace/src/lib.rs",
//! "line": 3,
//! "column: 0
//! },
//! ...
//! ]
//! },
//! ...
//! ],
//! "functions": [
//! {
//! "symbol": 789,
//! "name": "_ZN10crate_name26second_line_of_source_code17hce04ea776f1a67beE",
//! "line": 2,
//! "column": 0,
//! "summary": {
//! "blocks_count": 100,
//! "blocks_executed": 90,
//! "entry_count": 12,
//! "exit_count": 10, | //! }
//! },
//! ...
//! ]
//! }
//! ```
use error::{Result, ResultExt};
use sourcepath::{SourceType, identify_source_path};
use template::new as new_template;
use utils::{clean_dir, parent_3};
use copy_dir::copy_dir;
use cov::{self, Gcov, Graph, Interner, Report, Symbol};
use serde_json::Value;
use tera::{Context, Tera};
use std::ffi::OsStr;
use std::fs::{File, create_dir_all, read_dir};
use std::io::{BufRead, BufReader, Read, Write};
use std::path::{Path, PathBuf};
/// Entry point of `cargo cov report` subcommand. Renders the coverage report using a template.
pub fn generate(cov_build_path: &Path, template_name: &OsStr, allowed_source_types: SourceType) -> Result<Option<PathBuf>> {
let report_path = cov_build_path.with_file_name("report");
clean_dir(&report_path).chain_err(|| "Cannot clean report directory")?;
create_dir_all(&report_path)?;
let mut interner = Interner::new();
let graph = create_graph(cov_build_path, &mut interner).chain_err(|| "Cannot create graph")?;
let report = graph.report();
render(&report_path, template_name, allowed_source_types, &report, &interner).chain_err(|| "Cannot render report")
}
/// Creates an analyzed [`Graph`] from all GCNO and GCDA inside the `target/cov/build` folder.
///
/// [`Graph`]: ../../cov/graph/struct.Graph.html
fn create_graph(cov_build_path: &Path, interner: &mut Interner) -> cov::Result<Graph> {
let mut graph = Graph::default();
for extension in &["gcno", "gcda"] {
progress!("Parsing", "*.{} files", extension);
for entry in read_dir(cov_build_path.join(extension))? {
let path = entry?.path();
if path.extension() == Some(OsStr::new(extension)) {
trace!("merging {} {:?}", extension, path);
graph.merge(Gcov::open(path, interner)?)?;
}
}
}
graph.analyze();
Ok(graph)
}
/// Renders the `report` into `report_path` using a template.
///
/// If the template has a summary page, returns the path of the rendered summary.
fn render(report_path: &Path, template_name: &OsStr, allowed_source_types: SourceType, report: &Report, interner: &Interner) -> Result<Option<PathBuf>> {
use toml::de::from_slice;
let mut template_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
template_path.push("res");
template_path.push("templates");
template_path.push(template_name);
trace!("using templates at {:?}", template_path);
// Read the template configuration.
template_path.push("config.toml");
let mut config_file = File::open(&template_path).chain_err(|| format!("Cannot open template at `{}`", template_path.display()))?;
let mut config_bytes = Vec::new();
config_file.read_to_end(&mut config_bytes)?;
let config: Config = from_slice(&config_bytes).chain_err(|| "Cannot read template configuration")?;
// Copy the static resources if exist.
template_path.set_file_name("static");
if template_path.is_dir() {
copy_dir(&template_path, report_path.join("static"))?;
}
template_path.set_file_name("tera");
template_path.push("*");
// The report path is at $crate/target/cov/report, so we call .parent() three times.
let crate_path = parent_3(report_path).to_string_lossy();
let mut tera = new_template(template_path.to_str().expect("UTF-8 template path"))?;
let mut report_files = report
.files
.iter()
.filter_map(|(&symbol, file)| {
let path = &interner[symbol];
let source_type = identify_source_path(path, &crate_path).0;
if allowed_source_types.contains(source_type) {
Some(ReportFileEntry {
symbol,
source_type,
path,
file,
})
} else {
None
}
})
.collect::<Vec<_>>();
report_files.sort_by_key(|entry| (entry.source_type, entry.path));
let summary_path = if let Some(summary) = config.summary {
Some(write_summary(report_path, &report_files, &tera, &crate_path, &summary).chain_err(|| "Cannot write summary")?)
} else {
None
};
if let Some(files_config) = config.files {
tera.add_raw_template("<filename>", files_config.output)?;
for entry in &report_files {
write_file(report_path, interner, entry, &tera, &crate_path, files_config.template).chain_err(|| format!("Cannot write file at `{}`", entry.path))?;
}
}
Ok(summary_path)
}
struct ReportFileEntry<'a> {
symbol: Symbol,
source_type: SourceType,
path: &'a str,
file: &'a ::cov::report::File,
}
#[derive(Deserialize, Debug)]
struct Config<'a> {
#[serde(borrow)]
summary: Option<FileConfig<'a>>,
#[serde(borrow)]
files: Option<FileConfig<'a>>,
}
#[derive(Deserialize, Debug)]
struct FileConfig<'a> {
#[serde(borrow)]
output: &'a str,
#[serde(borrow)]
template: &'a str,
}
/// Renders the summary page.
fn write_summary(report_path: &Path, report_files: &[ReportFileEntry], tera: &Tera, crate_path: &str, config: &FileConfig) -> Result<PathBuf> {
let path = report_path.join(config.output);
let mut context = Context::new();
let files = report_files
.iter()
.map(|entry| {
json!({
"symbol": entry.symbol,
"path": entry.path,
"summary": entry.file.summary(),
})
})
.collect::<Vec<_>>();
context.add("crate_path", &crate_path);
context.add("files", &files);
let rendered = tera.render(config.template, &context)?;
let mut summary_file = File::create(&path)?;
summary_file.write_all(rendered.as_bytes())?;
progress!("Created", "{}", path.display());
Ok(path)
}
/// Renders report for a source path.
fn write_file(report_path: &Path, interner: &Interner, entry: &ReportFileEntry, tera: &Tera, crate_path: &str, template_name: &str) -> Result<()> {
let mut context = Context::new();
let mut lines = Vec::new();
let mut source_line_number = 1;
// Read the source file.
if let Ok(source_file) = File::open(entry.path) {
let source_file = BufReader::new(source_file);
for source_line in source_file.lines() {
let (count, branches) = if let Some(line) = entry.file.lines.get(&source_line_number) {
let (count, branches) = serialize_line(line, interner);
(Some(count), branches)
} else {
(None, Vec::new())
};
lines.push(json!({
"line": source_line_number,
"source": source_line?,
"count": count,
"branches": branches,
}));
source_line_number += 1;
}
}
// Add the remaining lines absent from the source file.
lines.extend(entry.file.lines.range(source_line_number..).map(|(line_number, line)| {
let (count, branches) = serialize_line(line, interner);
json!({
"line": *line_number,
"count": Some(count),
"source": Value::Null,
"branches": branches,
})
}));
// Collect function info
let functions = entry
.file
.functions
.iter()
.map(|f| {
let name = &interner[f.name];
json!({
"symbol": f.name,
"name": name,
"line": f.line,
"column": f.column,
"summary": &f.summary,
})
})
.collect::<Vec<_>>();
context.add("crate_path", &crate_path);
context.add("symbol", &entry.symbol);
context.add("path", &entry.path);
context.add("summary", &entry.file.summary());
context.add("lines", &lines);
context.add("functions", &functions);
let filename = tera.render("<filename>", &context)?;
let path = report_path.join(filename);
let rendered = tera.render(template_name, &context)?;
let mut file_file = File::create(path)?;
file_file.write_all(rendered.as_bytes())?;
Ok(())
}
/// Serializes a source line as a branch target into JSON value.
fn serialize_line(line: &::cov::report::Line, interner: &Interner) -> (u64, Vec<Value>) {
(
line.count,
line.branches
.iter()
.map(|branch| {
json!({
"count": branch.count,
"symbol": branch.filename,
"path": &interner[branch.filename],
"line": branch.line,
"column": branch.column,
})
})
.collect(),
)
} | //! "branches_count": 250,
//! "branches_executed": 225,
//! "branches_taken": 219 | random_line_split |
report.rs | //! Coverage report generation.
//!
//! # Template directory structure
//!
//! The coverage report produce two classes of files:
//!
//! * One summary page
//! * Source file pages, one page per source.
//!
//! `cargo cov` uses [Tera templates](https://github.com/Keats/tera#readme). Template files are stored using this
//! directory structure:
//!
//! ```text
//! cargo-cov/res/templates/«name»/
//! config.toml
//! tera/
//! summary_template.ext
//! file_template.ext
//! ...
//! static/
//! common.css
//! common.js
//! ...
//! ```
//!
//! When rendered, the output will have this structure:
//!
//! ```text
//! /path/to/workspace/target/cov/report/
//! static/
//! common.css
//! common.js
//! ...
//! summary.ext
//! file_123.ext
//! file_124.ext
//! ...
//! ```
//!
//! # Summary page
//!
//! If a summary page is needed, add the following section to `config.toml`:
//!
//! ```toml
//! [summary]
//! template = "summary_template.ext"
//! output = "summary.ext"
//! ```
//!
//! The summary page will be rendered to the file `summary.ext` using this data:
//!
//! ```json
//! {
//! "crate_path": "/path/to/workspace",
//! "files": [
//! {
//! "symbol": 123,
//! "path": "/path/to/workspace/src/lib.rs",
//! "summary": {
//! "lines_count": 500,
//! "lines_covered": 499,
//! "branches_count": 700,
//! "branches_executed": 650,
//! "branches_taken": 520,
//! "functions_count": 40,
//! "functions_called": 39
//! }
//! },
//! ...
//! ]
//! }
//! ```
//!
//! # File pages
//!
//! If the file pages are needed, add the following section to `config.toml`:
//!
//! ```toml
//! [summary]
//! template = "file_template.ext"
//! output = "file_{{ symbol }}.ext"
//! ```
//!
//! The output filename itself is a Tera template. The file pages will be rendered using this data:
//!
//! ```json
//! {
//! "crate_path": "/path/to/workspace",
//! "symbol": 123,
//! "path": "/path/to/workspace/src/lib.rs",
//! "summary": {
//! "lines_count": 500,
//! ...
//! },
//! "lines": [
//! {
//! "line": 1,
//! "source": "/// First line of the source code",
//! "count": null,
//! "branches": []
//! },
//! {
//! "line": 2,
//! "source": "pub fn second_line_of_source_code() {",
//! "count": 12,
//! "branches": [
//! {
//! "count": 6,
//! "symbol": 456,
//! "path": "/path/to/workspace/src/lib.rs",
//! "line": 3,
//! "column: 0
//! },
//! ...
//! ]
//! },
//! ...
//! ],
//! "functions": [
//! {
//! "symbol": 789,
//! "name": "_ZN10crate_name26second_line_of_source_code17hce04ea776f1a67beE",
//! "line": 2,
//! "column": 0,
//! "summary": {
//! "blocks_count": 100,
//! "blocks_executed": 90,
//! "entry_count": 12,
//! "exit_count": 10,
//! "branches_count": 250,
//! "branches_executed": 225,
//! "branches_taken": 219
//! }
//! },
//! ...
//! ]
//! }
//! ```
use error::{Result, ResultExt};
use sourcepath::{SourceType, identify_source_path};
use template::new as new_template;
use utils::{clean_dir, parent_3};
use copy_dir::copy_dir;
use cov::{self, Gcov, Graph, Interner, Report, Symbol};
use serde_json::Value;
use tera::{Context, Tera};
use std::ffi::OsStr;
use std::fs::{File, create_dir_all, read_dir};
use std::io::{BufRead, BufReader, Read, Write};
use std::path::{Path, PathBuf};
/// Entry point of `cargo cov report` subcommand. Renders the coverage report using a template.
pub fn generate(cov_build_path: &Path, template_name: &OsStr, allowed_source_types: SourceType) -> Result<Option<PathBuf>> {
let report_path = cov_build_path.with_file_name("report");
clean_dir(&report_path).chain_err(|| "Cannot clean report directory")?;
create_dir_all(&report_path)?;
let mut interner = Interner::new();
let graph = create_graph(cov_build_path, &mut interner).chain_err(|| "Cannot create graph")?;
let report = graph.report();
render(&report_path, template_name, allowed_source_types, &report, &interner).chain_err(|| "Cannot render report")
}
/// Creates an analyzed [`Graph`] from all GCNO and GCDA inside the `target/cov/build` folder.
///
/// [`Graph`]: ../../cov/graph/struct.Graph.html
fn create_graph(cov_build_path: &Path, interner: &mut Interner) -> cov::Result<Graph> {
| /// Renders the `report` into `report_path` using a template.
///
/// If the template has a summary page, returns the path of the rendered summary.
fn render(report_path: &Path, template_name: &OsStr, allowed_source_types: SourceType, report: &Report, interner: &Interner) -> Result<Option<PathBuf>> {
use toml::de::from_slice;
let mut template_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
template_path.push("res");
template_path.push("templates");
template_path.push(template_name);
trace!("using templates at {:?}", template_path);
// Read the template configuration.
template_path.push("config.toml");
let mut config_file = File::open(&template_path).chain_err(|| format!("Cannot open template at `{}`", template_path.display()))?;
let mut config_bytes = Vec::new();
config_file.read_to_end(&mut config_bytes)?;
let config: Config = from_slice(&config_bytes).chain_err(|| "Cannot read template configuration")?;
// Copy the static resources if exist.
template_path.set_file_name("static");
if template_path.is_dir() {
copy_dir(&template_path, report_path.join("static"))?;
}
template_path.set_file_name("tera");
template_path.push("*");
// The report path is at $crate/target/cov/report, so we call .parent() three times.
let crate_path = parent_3(report_path).to_string_lossy();
let mut tera = new_template(template_path.to_str().expect("UTF-8 template path"))?;
let mut report_files = report
.files
.iter()
.filter_map(|(&symbol, file)| {
let path = &interner[symbol];
let source_type = identify_source_path(path, &crate_path).0;
if allowed_source_types.contains(source_type) {
Some(ReportFileEntry {
symbol,
source_type,
path,
file,
})
} else {
None
}
})
.collect::<Vec<_>>();
report_files.sort_by_key(|entry| (entry.source_type, entry.path));
let summary_path = if let Some(summary) = config.summary {
Some(write_summary(report_path, &report_files, &tera, &crate_path, &summary).chain_err(|| "Cannot write summary")?)
} else {
None
};
if let Some(files_config) = config.files {
tera.add_raw_template("<filename>", files_config.output)?;
for entry in &report_files {
write_file(report_path, interner, entry, &tera, &crate_path, files_config.template).chain_err(|| format!("Cannot write file at `{}`", entry.path))?;
}
}
Ok(summary_path)
}
struct ReportFileEntry<'a> {
symbol: Symbol,
source_type: SourceType,
path: &'a str,
file: &'a ::cov::report::File,
}
#[derive(Deserialize, Debug)]
struct Config<'a> {
#[serde(borrow)]
summary: Option<FileConfig<'a>>,
#[serde(borrow)]
files: Option<FileConfig<'a>>,
}
#[derive(Deserialize, Debug)]
struct FileConfig<'a> {
#[serde(borrow)]
output: &'a str,
#[serde(borrow)]
template: &'a str,
}
/// Renders the summary page.
fn write_summary(report_path: &Path, report_files: &[ReportFileEntry], tera: &Tera, crate_path: &str, config: &FileConfig) -> Result<PathBuf> {
let path = report_path.join(config.output);
let mut context = Context::new();
let files = report_files
.iter()
.map(|entry| {
json!({
"symbol": entry.symbol,
"path": entry.path,
"summary": entry.file.summary(),
})
})
.collect::<Vec<_>>();
context.add("crate_path", &crate_path);
context.add("files", &files);
let rendered = tera.render(config.template, &context)?;
let mut summary_file = File::create(&path)?;
summary_file.write_all(rendered.as_bytes())?;
progress!("Created", "{}", path.display());
Ok(path)
}
/// Renders report for a source path.
fn write_file(report_path: &Path, interner: &Interner, entry: &ReportFileEntry, tera: &Tera, crate_path: &str, template_name: &str) -> Result<()> {
let mut context = Context::new();
let mut lines = Vec::new();
let mut source_line_number = 1;
// Read the source file.
if let Ok(source_file) = File::open(entry.path) {
let source_file = BufReader::new(source_file);
for source_line in source_file.lines() {
let (count, branches) = if let Some(line) = entry.file.lines.get(&source_line_number) {
let (count, branches) = serialize_line(line, interner);
(Some(count), branches)
} else {
(None, Vec::new())
};
lines.push(json!({
"line": source_line_number,
"source": source_line?,
"count": count,
"branches": branches,
}));
source_line_number += 1;
}
}
// Add the remaining lines absent from the source file.
lines.extend(entry.file.lines.range(source_line_number..).map(|(line_number, line)| {
let (count, branches) = serialize_line(line, interner);
json!({
"line": *line_number,
"count": Some(count),
"source": Value::Null,
"branches": branches,
})
}));
// Collect function info
let functions = entry
.file
.functions
.iter()
.map(|f| {
let name = &interner[f.name];
json!({
"symbol": f.name,
"name": name,
"line": f.line,
"column": f.column,
"summary": &f.summary,
})
})
.collect::<Vec<_>>();
context.add("crate_path", &crate_path);
context.add("symbol", &entry.symbol);
context.add("path", &entry.path);
context.add("summary", &entry.file.summary());
context.add("lines", &lines);
context.add("functions", &functions);
let filename = tera.render("<filename>", &context)?;
let path = report_path.join(filename);
let rendered = tera.render(template_name, &context)?;
let mut file_file = File::create(path)?;
file_file.write_all(rendered.as_bytes())?;
Ok(())
}
/// Serializes a source line as a branch target into JSON value.
fn serialize_line(line: &::cov::report::Line, interner: &Interner) -> (u64, Vec<Value>) {
(
line.count,
line.branches
.iter()
.map(|branch| {
json!({
"count": branch.count,
"symbol": branch.filename,
"path": &interner[branch.filename],
"line": branch.line,
"column": branch.column,
})
})
.collect(),
)
}
| let mut graph = Graph::default();
for extension in &["gcno", "gcda"] {
progress!("Parsing", "*.{} files", extension);
for entry in read_dir(cov_build_path.join(extension))? {
let path = entry?.path();
if path.extension() == Some(OsStr::new(extension)) {
trace!("merging {} {:?}", extension, path);
graph.merge(Gcov::open(path, interner)?)?;
}
}
}
graph.analyze();
Ok(graph)
}
| identifier_body |
report.rs | //! Coverage report generation.
//!
//! # Template directory structure
//!
//! The coverage report produce two classes of files:
//!
//! * One summary page
//! * Source file pages, one page per source.
//!
//! `cargo cov` uses [Tera templates](https://github.com/Keats/tera#readme). Template files are stored using this
//! directory structure:
//!
//! ```text
//! cargo-cov/res/templates/«name»/
//! config.toml
//! tera/
//! summary_template.ext
//! file_template.ext
//! ...
//! static/
//! common.css
//! common.js
//! ...
//! ```
//!
//! When rendered, the output will have this structure:
//!
//! ```text
//! /path/to/workspace/target/cov/report/
//! static/
//! common.css
//! common.js
//! ...
//! summary.ext
//! file_123.ext
//! file_124.ext
//! ...
//! ```
//!
//! # Summary page
//!
//! If a summary page is needed, add the following section to `config.toml`:
//!
//! ```toml
//! [summary]
//! template = "summary_template.ext"
//! output = "summary.ext"
//! ```
//!
//! The summary page will be rendered to the file `summary.ext` using this data:
//!
//! ```json
//! {
//! "crate_path": "/path/to/workspace",
//! "files": [
//! {
//! "symbol": 123,
//! "path": "/path/to/workspace/src/lib.rs",
//! "summary": {
//! "lines_count": 500,
//! "lines_covered": 499,
//! "branches_count": 700,
//! "branches_executed": 650,
//! "branches_taken": 520,
//! "functions_count": 40,
//! "functions_called": 39
//! }
//! },
//! ...
//! ]
//! }
//! ```
//!
//! # File pages
//!
//! If the file pages are needed, add the following section to `config.toml`:
//!
//! ```toml
//! [summary]
//! template = "file_template.ext"
//! output = "file_{{ symbol }}.ext"
//! ```
//!
//! The output filename itself is a Tera template. The file pages will be rendered using this data:
//!
//! ```json
//! {
//! "crate_path": "/path/to/workspace",
//! "symbol": 123,
//! "path": "/path/to/workspace/src/lib.rs",
//! "summary": {
//! "lines_count": 500,
//! ...
//! },
//! "lines": [
//! {
//! "line": 1,
//! "source": "/// First line of the source code",
//! "count": null,
//! "branches": []
//! },
//! {
//! "line": 2,
//! "source": "pub fn second_line_of_source_code() {",
//! "count": 12,
//! "branches": [
//! {
//! "count": 6,
//! "symbol": 456,
//! "path": "/path/to/workspace/src/lib.rs",
//! "line": 3,
//! "column: 0
//! },
//! ...
//! ]
//! },
//! ...
//! ],
//! "functions": [
//! {
//! "symbol": 789,
//! "name": "_ZN10crate_name26second_line_of_source_code17hce04ea776f1a67beE",
//! "line": 2,
//! "column": 0,
//! "summary": {
//! "blocks_count": 100,
//! "blocks_executed": 90,
//! "entry_count": 12,
//! "exit_count": 10,
//! "branches_count": 250,
//! "branches_executed": 225,
//! "branches_taken": 219
//! }
//! },
//! ...
//! ]
//! }
//! ```
use error::{Result, ResultExt};
use sourcepath::{SourceType, identify_source_path};
use template::new as new_template;
use utils::{clean_dir, parent_3};
use copy_dir::copy_dir;
use cov::{self, Gcov, Graph, Interner, Report, Symbol};
use serde_json::Value;
use tera::{Context, Tera};
use std::ffi::OsStr;
use std::fs::{File, create_dir_all, read_dir};
use std::io::{BufRead, BufReader, Read, Write};
use std::path::{Path, PathBuf};
/// Entry point of `cargo cov report` subcommand. Renders the coverage report using a template.
pub fn generate(cov_build_path: &Path, template_name: &OsStr, allowed_source_types: SourceType) -> Result<Option<PathBuf>> {
let report_path = cov_build_path.with_file_name("report");
clean_dir(&report_path).chain_err(|| "Cannot clean report directory")?;
create_dir_all(&report_path)?;
let mut interner = Interner::new();
let graph = create_graph(cov_build_path, &mut interner).chain_err(|| "Cannot create graph")?;
let report = graph.report();
render(&report_path, template_name, allowed_source_types, &report, &interner).chain_err(|| "Cannot render report")
}
/// Creates an analyzed [`Graph`] from all GCNO and GCDA inside the `target/cov/build` folder.
///
/// [`Graph`]: ../../cov/graph/struct.Graph.html
fn create_graph(cov_build_path: &Path, interner: &mut Interner) -> cov::Result<Graph> {
let mut graph = Graph::default();
for extension in &["gcno", "gcda"] {
progress!("Parsing", "*.{} files", extension);
for entry in read_dir(cov_build_path.join(extension))? {
let path = entry?.path();
if path.extension() == Some(OsStr::new(extension)) {
| }
}
graph.analyze();
Ok(graph)
}
/// Renders the `report` into `report_path` using a template.
///
/// If the template has a summary page, returns the path of the rendered summary.
fn render(report_path: &Path, template_name: &OsStr, allowed_source_types: SourceType, report: &Report, interner: &Interner) -> Result<Option<PathBuf>> {
use toml::de::from_slice;
let mut template_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
template_path.push("res");
template_path.push("templates");
template_path.push(template_name);
trace!("using templates at {:?}", template_path);
// Read the template configuration.
template_path.push("config.toml");
let mut config_file = File::open(&template_path).chain_err(|| format!("Cannot open template at `{}`", template_path.display()))?;
let mut config_bytes = Vec::new();
config_file.read_to_end(&mut config_bytes)?;
let config: Config = from_slice(&config_bytes).chain_err(|| "Cannot read template configuration")?;
// Copy the static resources if exist.
template_path.set_file_name("static");
if template_path.is_dir() {
copy_dir(&template_path, report_path.join("static"))?;
}
template_path.set_file_name("tera");
template_path.push("*");
// The report path is at $crate/target/cov/report, so we call .parent() three times.
let crate_path = parent_3(report_path).to_string_lossy();
let mut tera = new_template(template_path.to_str().expect("UTF-8 template path"))?;
let mut report_files = report
.files
.iter()
.filter_map(|(&symbol, file)| {
let path = &interner[symbol];
let source_type = identify_source_path(path, &crate_path).0;
if allowed_source_types.contains(source_type) {
Some(ReportFileEntry {
symbol,
source_type,
path,
file,
})
} else {
None
}
})
.collect::<Vec<_>>();
report_files.sort_by_key(|entry| (entry.source_type, entry.path));
let summary_path = if let Some(summary) = config.summary {
Some(write_summary(report_path, &report_files, &tera, &crate_path, &summary).chain_err(|| "Cannot write summary")?)
} else {
None
};
if let Some(files_config) = config.files {
tera.add_raw_template("<filename>", files_config.output)?;
for entry in &report_files {
write_file(report_path, interner, entry, &tera, &crate_path, files_config.template).chain_err(|| format!("Cannot write file at `{}`", entry.path))?;
}
}
Ok(summary_path)
}
struct ReportFileEntry<'a> {
symbol: Symbol,
source_type: SourceType,
path: &'a str,
file: &'a ::cov::report::File,
}
#[derive(Deserialize, Debug)]
struct Config<'a> {
#[serde(borrow)]
summary: Option<FileConfig<'a>>,
#[serde(borrow)]
files: Option<FileConfig<'a>>,
}
#[derive(Deserialize, Debug)]
struct FileConfig<'a> {
#[serde(borrow)]
output: &'a str,
#[serde(borrow)]
template: &'a str,
}
/// Renders the summary page.
fn write_summary(report_path: &Path, report_files: &[ReportFileEntry], tera: &Tera, crate_path: &str, config: &FileConfig) -> Result<PathBuf> {
let path = report_path.join(config.output);
let mut context = Context::new();
let files = report_files
.iter()
.map(|entry| {
json!({
"symbol": entry.symbol,
"path": entry.path,
"summary": entry.file.summary(),
})
})
.collect::<Vec<_>>();
context.add("crate_path", &crate_path);
context.add("files", &files);
let rendered = tera.render(config.template, &context)?;
let mut summary_file = File::create(&path)?;
summary_file.write_all(rendered.as_bytes())?;
progress!("Created", "{}", path.display());
Ok(path)
}
/// Renders report for a source path.
fn write_file(report_path: &Path, interner: &Interner, entry: &ReportFileEntry, tera: &Tera, crate_path: &str, template_name: &str) -> Result<()> {
let mut context = Context::new();
let mut lines = Vec::new();
let mut source_line_number = 1;
// Read the source file.
if let Ok(source_file) = File::open(entry.path) {
let source_file = BufReader::new(source_file);
for source_line in source_file.lines() {
let (count, branches) = if let Some(line) = entry.file.lines.get(&source_line_number) {
let (count, branches) = serialize_line(line, interner);
(Some(count), branches)
} else {
(None, Vec::new())
};
lines.push(json!({
"line": source_line_number,
"source": source_line?,
"count": count,
"branches": branches,
}));
source_line_number += 1;
}
}
// Add the remaining lines absent from the source file.
lines.extend(entry.file.lines.range(source_line_number..).map(|(line_number, line)| {
let (count, branches) = serialize_line(line, interner);
json!({
"line": *line_number,
"count": Some(count),
"source": Value::Null,
"branches": branches,
})
}));
// Collect function info
let functions = entry
.file
.functions
.iter()
.map(|f| {
let name = &interner[f.name];
json!({
"symbol": f.name,
"name": name,
"line": f.line,
"column": f.column,
"summary": &f.summary,
})
})
.collect::<Vec<_>>();
context.add("crate_path", &crate_path);
context.add("symbol", &entry.symbol);
context.add("path", &entry.path);
context.add("summary", &entry.file.summary());
context.add("lines", &lines);
context.add("functions", &functions);
let filename = tera.render("<filename>", &context)?;
let path = report_path.join(filename);
let rendered = tera.render(template_name, &context)?;
let mut file_file = File::create(path)?;
file_file.write_all(rendered.as_bytes())?;
Ok(())
}
/// Serializes a source line as a branch target into JSON value.
fn serialize_line(line: &::cov::report::Line, interner: &Interner) -> (u64, Vec<Value>) {
(
line.count,
line.branches
.iter()
.map(|branch| {
json!({
"count": branch.count,
"symbol": branch.filename,
"path": &interner[branch.filename],
"line": branch.line,
"column": branch.column,
})
})
.collect(),
)
}
| trace!("merging {} {:?}", extension, path);
graph.merge(Gcov::open(path, interner)?)?;
}
| conditional_block |
main.rs | #[cfg(feature = "pulse")] extern crate libpulse_sys;
#[cfg(feature = "tokio")] extern crate ctrlc;
#[cfg(feature = "tokio")] extern crate futures;
#[cfg(feature = "tokio")] extern crate tokio_core;
#[cfg(feature = "tokio")] extern crate tokio_io;
#[cfg(feature = "tokio")] extern crate tokio_uds;
#[macro_use] extern crate clap;
extern crate x11;
#[cfg(feature = "pulse")] mod pulse;
#[cfg(feature = "pulse")] use libpulse_sys::context::*;
#[cfg(feature = "pulse")] use libpulse_sys::context::pa_context;
#[cfg(feature = "pulse")] use libpulse_sys::context::subscribe::pa_subscription_event_type_t;
#[cfg(feature = "pulse")] use libpulse_sys::mainloop::threaded::*;
#[cfg(feature = "pulse")] use pulse::PulseAudio;
#[cfg(feature = "tokio")] use futures::future::{self, Loop};
#[cfg(feature = "tokio")] use futures::sync::mpsc;
#[cfg(feature = "tokio")] use futures::{Future, Sink, Stream};
#[cfg(feature = "tokio")] use std::cell::RefCell;
#[cfg(feature = "tokio")] use std::fs;
#[cfg(feature = "tokio")] use std::io::ErrorKind;
#[cfg(feature = "tokio")] use std::rc::Rc;
#[cfg(feature = "tokio")] use tokio_core::reactor::{Core, Timeout};
#[cfg(feature = "tokio")] use tokio_io::io;
#[cfg(feature = "tokio")] use tokio_uds::UnixListener;
#[cfg(not(feature = "tokio"))] use std::thread;
use clap::{App as ClapApp, Arg};
use std::ffi::CString;
use std::os::raw::c_void;
use std::process::Command;
use std::{mem, ptr};
use std::time::Duration;
use x11::xlib::{Display, XA_ATOM, XCloseDisplay, XDefaultRootWindow, XFree, XGetInputFocus, XGetWindowProperty,
XInternAtom, XOpenDisplay};
use x11::xss::{XScreenSaverAllocInfo, XScreenSaverInfo, XScreenSaverQueryInfo};
struct DeferXClose(*mut Display);
impl Drop for DeferXClose {
fn drop(&mut self) {
unsafe { XCloseDisplay(self.0); }
}
}
struct DeferXFree(*mut c_void);
impl Drop for DeferXFree {
fn drop(&mut self) {
unsafe { XFree(self.0); }
}
}
const SCALE: u64 = 60; // Second:minute scale. Can be changed for debugging purposes.
#[cfg(feature = "tokio")] const COMMAND_DEACTIVATE: u8 = 0;
#[cfg(feature = "tokio")] const COMMAND_ACTIVATE: u8 = 1;
#[cfg(feature = "tokio")] const COMMAND_TRIGGER: u8 = 2;
fn main() {
let success = do_main();
std::process::exit(if success { 0 } else { 1 });
}
fn do_main() -> bool {
let clap_app = ClapApp::new(crate_name!())
.author(crate_authors!())
.version(crate_version!())
// Flags
.arg(
Arg::with_name("print")
.help("Print the idle time to standard output. This is similar to xprintidle.")
.long("print")
)
.arg(
Arg::with_name("not-when-fullscreen")
.help("Don't invoke the timer when the current application is fullscreen. \
Useful for preventing the lockscreen when watching videos")
.long("not-when-fullscreen")
.conflicts_with("print")
)
.arg(
Arg::with_name("once")
.help("Exit after timer command has been invoked once. \
This does not include manual invoking using the socket.")
.long("once")
.conflicts_with("print")
)
// Options
.arg(
Arg::with_name("time")
.help("Set the required amount of idle minutes before invoking timer")
.long("time")
.takes_value(true)
.required_unless("print")
.conflicts_with("print")
)
.arg(
Arg::with_name("timer")
.help("Set command to run when the timer goes off")
.long("timer")
.takes_value(true)
.required_unless("print")
.conflicts_with("print")
)
.arg(
Arg::with_name("notify")
.help("Run the command passed by --notifier _ seconds before timer goes off")
.long("notify")
.takes_value(true)
.requires("notifier")
.conflicts_with("print")
)
.arg(
Arg::with_name("notifier")
.help("Set the command to run when notifier goes off (see --notify)")
.long("notifier")
.takes_value(true)
.requires("notify")
.conflicts_with("print")
)
.arg(
Arg::with_name("canceller")
.help("Set the command to run when user cancels the timer after the notifier has already gone off")
.long("canceller")
.takes_value(true)
.requires("notify")
.conflicts_with("print")
);
#[cfg(feature = "tokio")]
let mut clap_app = clap_app; // make mutable
#[cfg(feature = "tokio")] {
clap_app = clap_app
.arg(
Arg::with_name("socket")
.help("Listen to events over a unix socket")
.long("socket")
.takes_value(true)
.conflicts_with("print")
);
}
#[cfg(feature = "pulse")] {
clap_app = clap_app
.arg(
Arg::with_name("not-when-audio")
.help("Don't invoke the timer when any audio is playing (PulseAudio specific)")
.long("not-when-audio")
.conflicts_with("print")
);
}
let matches = clap_app.get_matches();
let display = unsafe { XOpenDisplay(ptr::null()) };
if display.is_null() {
eprintln!("failed to open x server");
return false;
}
let _cleanup = DeferXClose(display);
let info = unsafe { XScreenSaverAllocInfo() };
let _cleanup = DeferXFree(info as *mut c_void);
if matches.is_present("print") {
if let Ok(idle) = get_idle(display, info) {
println!("{}", idle);
}
return true;
}
let time = value_t_or_exit!(matches, "time", u32) as u64 * SCALE;
let app = App {
active: true,
audio: false,
delay: Duration::from_secs(SCALE),
display: display,
info: info,
not_when_fullscreen: matches.is_present("not-when-fullscreen"),
once: matches.is_present("once"),
time: time,
timer: matches.value_of("timer").unwrap().to_string(),
notify: value_t!(matches, "notify", u32).ok().map(|notify| notify as u64),
notifier: matches.value_of("notifier").map(String::from),
canceller: matches.value_of("canceller").map(String::from),
ran_notify: false,
ran_timer: false,
fullscreen: None,
time_new: time
};
#[cfg(not(feature = "tokio"))] {
let mut app = app;
loop {
if let Some(exit) = app.step() {
return exit;
}
thread::sleep(app.delay);
}
}
#[cfg(feature = "tokio")] {
#[cfg(feature = "pulse")]
let not_when_audio = matches.is_present("not-when-audio");
let socket = matches.value_of("socket");
let app = Rc::new(RefCell::new(app));
let mut core = Core::new().unwrap();
let handle = Rc::new(core.handle());
let (tx_stop, rx_stop) = mpsc::channel(1);
let tx_stop = Some(tx_stop);
let tx_stop_clone = RefCell::new(tx_stop.clone());
if let Err(err) =
ctrlc::set_handler(move || {
if let Some(tx_stop) = tx_stop_clone.borrow_mut().take() {
tx_stop.send(()).wait().unwrap();
}
}) {
eprintln!("failed to create signal handler: {}", err);
}
if let Some(socket) = socket {
let listener = match UnixListener::bind(socket, &handle) {
Ok(listener) => listener,
Err(err) => {
eprintln!("failed to bind unix socket: {}", err);
return false;
}
};
let app = Rc::clone(&app);
let handle_clone = Rc::clone(&handle);
handle.spawn(listener.incoming()
.map_err(|err| eprintln!("listener error: {}", err))
.for_each(move |(conn, _)| {
let app = Rc::clone(&app);
handle_clone.spawn(future::loop_fn(conn, move |conn| {
let app = Rc::clone(&app);
io::read_exact(conn, [0; 1])
.map_err(|err| {
if err.kind() != ErrorKind::UnexpectedEof {
eprintln!("io error: {}", err);
}
})
.and_then(move |(conn, buf)| {
match buf[0] {
COMMAND_ACTIVATE => app.borrow_mut().active = true,
COMMAND_DEACTIVATE => app.borrow_mut().active = false,
COMMAND_TRIGGER => app.borrow().trigger(),
x => eprintln!("unix socket: invalid command: {}", x)
}
Ok(Loop::Continue(conn))
})
}));
Ok(())
}))
}
#[cfg(feature = "pulse")]
let mut _tx_pulse = None; // Keep sender alive. This must be declared after _pulse so it's dropped before.
#[cfg(feature = "pulse")]
let mut _pulse = None; // Keep pulse alive
#[cfg(feature = "pulse")] {
if not_when_audio {
enum Event {
Clear,
New,
Finish
}
let (tx, rx) = mpsc::unbounded::<Event>();
// Can't do this last because we need the updated pointer
_tx_pulse = Some(tx);
let tx = _tx_pulse.as_mut().unwrap();
let pulse = PulseAudio::default();
extern "C" fn sink_info_callback(
_: *mut pa_context,
info: *const pa_sink_input_info,
_: i32,
userdata: *mut c_void
) {
unsafe {
let tx = userdata as *mut _ as *mut mpsc::UnboundedSender<Event>;
if info.is_null() {
(&*tx).unbounded_send(Event::Finish).unwrap();
} else if (*info).corked == 0 {
(&*tx).unbounded_send(Event::New).unwrap();
}
}
}
extern "C" fn subscribe_callback(
ctx: *mut pa_context,
_: pa_subscription_event_type_t,
_: u32,
userdata: *mut c_void
) {
unsafe {
let tx = userdata as *mut _ as *mut mpsc::UnboundedSender<Event>;
(&*tx).unbounded_send(Event::Clear).unwrap();
// You *could* keep track of events here (like making change events toggle the on/off status),
// but it's not reliable
pa_context_get_sink_input_info_list(ctx, Some(sink_info_callback), userdata);
}
}
extern "C" fn state_callback(ctx: *mut pa_context, userdata: *mut c_void) {
unsafe {
let state = pa_context_get_state(ctx);
if state == PA_CONTEXT_READY {
pa_context_set_subscribe_callback(ctx, Some(subscribe_callback), userdata);
pa_context_subscribe(ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, None, ptr::null_mut());
// In case audio already plays
pa_context_get_sink_input_info_list(ctx, Some(sink_info_callback), userdata);
}
}
}
let mut playing = 0;
let app = Rc::clone(&app);
handle.spawn(rx.for_each(move |event| {
match event {
Event::Clear => playing = 0,
Event::New => playing += 1,
Event::Finish => {
// We've successfully counted all playing inputs
app.borrow_mut().audio = playing != 0;
}
}
Ok(())
}));
let userdata = tx as *mut _ as *mut c_void;
unsafe {
pa_context_set_state_callback(pulse.ctx, Some(state_callback), userdata);
pa_context_connect(pulse.ctx, ptr::null(), 0, ptr::null());
pa_threaded_mainloop_start(pulse.main);
}
// Keep pulse alive
_pulse = Some(pulse);
}
}
let handle_clone = Rc::clone(&handle);
handle.spawn(future::loop_fn((), move |_| {
let mut tx_stop = tx_stop.clone();
let app = Rc::clone(&app);
let delay = app.borrow().delay;
Timeout::new(delay, &handle_clone)
.unwrap()
.map_err(|_| ())
.and_then(move |_| {
let step = app.borrow_mut().step();
if step.is_none() {
return Ok(Loop::Continue(()));
}
tx_stop.take().unwrap().send(()).wait().unwrap();
if step.unwrap() {
Ok(Loop::Break(()))
} else {
Err(())
}
})
}));
let status = core.run(rx_stop.into_future()).is_ok();
if let Some(socket) = socket {
if let Err(err) = fs::remove_file(socket) {
eprintln!("failed to clean up unix socket: {}", err);
}
}
status
}
}
struct App {
active: bool,
audio: bool,
delay: Duration,
display: *mut Display,
info: *mut XScreenSaverInfo,
not_when_fullscreen: bool,
once: bool,
time: u64,
timer: String,
notify: Option<u64>,
notifier: Option<String>,
canceller: Option<String>,
ran_notify: bool,
ran_timer: bool,
fullscreen: Option<bool>,
time_new: u64
}
impl App {
fn step(&mut self) -> Option<bool> {
let active = self.active && !self.audio;
// audio is always false when not-when-audio isn't set, don't worry
let default_delay = Duration::from_secs(SCALE); // TODO: const fn
let idle = if active | else { None };
if active &&
self.notify.map(|notify| (idle.unwrap() + notify) >= self.time).unwrap_or(idle.unwrap() >= self.time) {
let idle = idle.unwrap();
if self.not_when_fullscreen && self.fullscreen.is_none() {
let mut focus = 0u64;
let mut revert = 0i32;
let mut actual_type = 0u64;
let mut actual_format = 0i32;
let mut nitems = 0u64;
let mut bytes = 0u64;
let mut data: *mut u8 = unsafe { mem::uninitialized() };
self.fullscreen = Some(unsafe {
XGetInputFocus(self.display, &mut focus as *mut _, &mut revert as *mut _);
let cstring = CString::from_vec_unchecked("_NET_WM_STATE".into());
if XGetWindowProperty(
self.display,
focus,
XInternAtom(self.display, cstring.as_ptr(), 0),
0,
!0,
0,
XA_ATOM,
&mut actual_type,
&mut actual_format,
&mut nitems,
&mut bytes,
&mut data
) != 0 {
eprintln!("failed to get window property");
false
} else {
// Welcome to hell.
// I spent waay to long trying to get `data` to work.
// Currently it returns 75, because it overflows 331 to fit into a byte.
// Changing `data` to a *mut u64 gives me 210453397504.
// I have no idea why, and at this point I don't want to know.
// So here I'll just compare it to 75 and assume fullscreen.
let mut fullscreen = false;
for i in 0..nitems as isize {
let cstring = CString::from_vec_unchecked("_NET_WM_STATE_FULLSCREEN".into());
if *data.offset(i) == (XInternAtom(self.display, cstring.as_ptr(), 0) & 0xFF) as u8 {
fullscreen = true;
break;
}
}
XFree(data as *mut c_void);
fullscreen
}
});
}
if !self.not_when_fullscreen || !self.fullscreen.unwrap() {
if self.notify.is_some() && !self.ran_notify {
invoke(self.notifier.as_ref().unwrap());
self.ran_notify = true;
self.delay = Duration::from_secs(1);
// Since the delay is usually a minute, I could've exceeded both the notify and the timer.
// The simple solution is to change the timer to a point where it's guaranteed
// it's been _ seconds since the notifier.
self.time_new = idle + self.notify.unwrap();
} else if idle >= self.time_new && !self.ran_timer {
self.trigger();
if self.once {
return Some(true);
}
self.ran_timer = true;
self.delay = default_delay;
}
}
} else {
if self.ran_notify && !self.ran_timer {
// In case the user goes back from being idle between the notify and timer
if let Some(canceller) = self.canceller.as_ref() {
invoke(canceller);
}
}
self.delay = default_delay;
self.ran_notify = false;
self.ran_timer = false;
self.fullscreen = None;
}
None
}
fn trigger(&self) {
invoke(&self.timer);
}
}
fn get_idle(display: *mut Display, info: *mut XScreenSaverInfo) -> Result<u64, ()> {
if unsafe { XScreenSaverQueryInfo(display, XDefaultRootWindow(display), info) } == 0 {
eprintln!("failed to query screen saver info");
return Err(());
}
Ok(unsafe { (*info).idle })
}
fn invoke(cmd: &str) {
if let Err(err) =
Command::new("sh")
.arg("-c")
.arg(cmd)
.status() {
eprintln!("failed to invoke command: {}", err);
}
}
| {
Some(match get_idle(self.display, self.info) {
Ok(idle) => idle / 1000, // Convert to seconds
Err(_) => return Some(false)
})
} | conditional_block |
main.rs | #[cfg(feature = "pulse")] extern crate libpulse_sys;
#[cfg(feature = "tokio")] extern crate ctrlc;
#[cfg(feature = "tokio")] extern crate futures;
#[cfg(feature = "tokio")] extern crate tokio_core;
#[cfg(feature = "tokio")] extern crate tokio_io;
#[cfg(feature = "tokio")] extern crate tokio_uds;
#[macro_use] extern crate clap;
extern crate x11;
#[cfg(feature = "pulse")] mod pulse;
#[cfg(feature = "pulse")] use libpulse_sys::context::*;
#[cfg(feature = "pulse")] use libpulse_sys::context::pa_context;
#[cfg(feature = "pulse")] use libpulse_sys::context::subscribe::pa_subscription_event_type_t;
#[cfg(feature = "pulse")] use libpulse_sys::mainloop::threaded::*;
#[cfg(feature = "pulse")] use pulse::PulseAudio;
#[cfg(feature = "tokio")] use futures::future::{self, Loop};
#[cfg(feature = "tokio")] use futures::sync::mpsc;
#[cfg(feature = "tokio")] use futures::{Future, Sink, Stream};
#[cfg(feature = "tokio")] use std::cell::RefCell;
#[cfg(feature = "tokio")] use std::fs;
#[cfg(feature = "tokio")] use std::io::ErrorKind;
#[cfg(feature = "tokio")] use std::rc::Rc;
#[cfg(feature = "tokio")] use tokio_core::reactor::{Core, Timeout};
#[cfg(feature = "tokio")] use tokio_io::io;
#[cfg(feature = "tokio")] use tokio_uds::UnixListener;
#[cfg(not(feature = "tokio"))] use std::thread;
use clap::{App as ClapApp, Arg};
use std::ffi::CString;
use std::os::raw::c_void;
use std::process::Command;
use std::{mem, ptr};
use std::time::Duration;
use x11::xlib::{Display, XA_ATOM, XCloseDisplay, XDefaultRootWindow, XFree, XGetInputFocus, XGetWindowProperty,
XInternAtom, XOpenDisplay};
use x11::xss::{XScreenSaverAllocInfo, XScreenSaverInfo, XScreenSaverQueryInfo};
struct DeferXClose(*mut Display);
impl Drop for DeferXClose {
fn drop(&mut self) {
unsafe { XCloseDisplay(self.0); }
}
}
struct DeferXFree(*mut c_void);
impl Drop for DeferXFree {
fn drop(&mut self) {
unsafe { XFree(self.0); }
}
}
const SCALE: u64 = 60; // Second:minute scale. Can be changed for debugging purposes.
#[cfg(feature = "tokio")] const COMMAND_DEACTIVATE: u8 = 0;
#[cfg(feature = "tokio")] const COMMAND_ACTIVATE: u8 = 1;
#[cfg(feature = "tokio")] const COMMAND_TRIGGER: u8 = 2;
fn main() {
let success = do_main();
std::process::exit(if success { 0 } else { 1 });
}
fn do_main() -> bool {
let clap_app = ClapApp::new(crate_name!())
.author(crate_authors!())
.version(crate_version!())
// Flags
.arg(
Arg::with_name("print")
.help("Print the idle time to standard output. This is similar to xprintidle.")
.long("print")
)
.arg(
Arg::with_name("not-when-fullscreen")
.help("Don't invoke the timer when the current application is fullscreen. \
Useful for preventing the lockscreen when watching videos")
.long("not-when-fullscreen")
.conflicts_with("print")
)
.arg(
Arg::with_name("once")
.help("Exit after timer command has been invoked once. \
This does not include manual invoking using the socket.")
.long("once")
.conflicts_with("print")
)
// Options
.arg(
Arg::with_name("time")
.help("Set the required amount of idle minutes before invoking timer")
.long("time")
.takes_value(true)
.required_unless("print")
.conflicts_with("print")
)
.arg(
Arg::with_name("timer")
.help("Set command to run when the timer goes off")
.long("timer")
.takes_value(true)
.required_unless("print")
.conflicts_with("print")
)
.arg(
Arg::with_name("notify")
.help("Run the command passed by --notifier _ seconds before timer goes off")
.long("notify")
.takes_value(true)
.requires("notifier")
.conflicts_with("print")
)
.arg(
Arg::with_name("notifier")
.help("Set the command to run when notifier goes off (see --notify)")
.long("notifier")
.takes_value(true)
.requires("notify")
.conflicts_with("print")
)
.arg(
Arg::with_name("canceller")
.help("Set the command to run when user cancels the timer after the notifier has already gone off")
.long("canceller")
.takes_value(true)
.requires("notify")
.conflicts_with("print")
);
#[cfg(feature = "tokio")]
let mut clap_app = clap_app; // make mutable
#[cfg(feature = "tokio")] {
clap_app = clap_app
.arg(
Arg::with_name("socket")
.help("Listen to events over a unix socket")
.long("socket")
.takes_value(true)
.conflicts_with("print")
);
}
#[cfg(feature = "pulse")] {
clap_app = clap_app
.arg(
Arg::with_name("not-when-audio")
.help("Don't invoke the timer when any audio is playing (PulseAudio specific)")
.long("not-when-audio")
.conflicts_with("print")
);
}
let matches = clap_app.get_matches();
let display = unsafe { XOpenDisplay(ptr::null()) };
if display.is_null() {
eprintln!("failed to open x server");
return false;
}
let _cleanup = DeferXClose(display);
let info = unsafe { XScreenSaverAllocInfo() };
let _cleanup = DeferXFree(info as *mut c_void);
if matches.is_present("print") {
if let Ok(idle) = get_idle(display, info) {
println!("{}", idle);
}
return true;
}
let time = value_t_or_exit!(matches, "time", u32) as u64 * SCALE;
let app = App {
active: true,
audio: false,
delay: Duration::from_secs(SCALE),
display: display,
info: info,
not_when_fullscreen: matches.is_present("not-when-fullscreen"),
once: matches.is_present("once"),
time: time,
timer: matches.value_of("timer").unwrap().to_string(),
notify: value_t!(matches, "notify", u32).ok().map(|notify| notify as u64),
notifier: matches.value_of("notifier").map(String::from),
canceller: matches.value_of("canceller").map(String::from),
ran_notify: false,
ran_timer: false,
fullscreen: None,
time_new: time
};
#[cfg(not(feature = "tokio"))] {
let mut app = app;
loop {
if let Some(exit) = app.step() {
return exit;
} | }
#[cfg(feature = "tokio")] {
#[cfg(feature = "pulse")]
let not_when_audio = matches.is_present("not-when-audio");
let socket = matches.value_of("socket");
let app = Rc::new(RefCell::new(app));
let mut core = Core::new().unwrap();
let handle = Rc::new(core.handle());
let (tx_stop, rx_stop) = mpsc::channel(1);
let tx_stop = Some(tx_stop);
let tx_stop_clone = RefCell::new(tx_stop.clone());
if let Err(err) =
ctrlc::set_handler(move || {
if let Some(tx_stop) = tx_stop_clone.borrow_mut().take() {
tx_stop.send(()).wait().unwrap();
}
}) {
eprintln!("failed to create signal handler: {}", err);
}
if let Some(socket) = socket {
let listener = match UnixListener::bind(socket, &handle) {
Ok(listener) => listener,
Err(err) => {
eprintln!("failed to bind unix socket: {}", err);
return false;
}
};
let app = Rc::clone(&app);
let handle_clone = Rc::clone(&handle);
handle.spawn(listener.incoming()
.map_err(|err| eprintln!("listener error: {}", err))
.for_each(move |(conn, _)| {
let app = Rc::clone(&app);
handle_clone.spawn(future::loop_fn(conn, move |conn| {
let app = Rc::clone(&app);
io::read_exact(conn, [0; 1])
.map_err(|err| {
if err.kind() != ErrorKind::UnexpectedEof {
eprintln!("io error: {}", err);
}
})
.and_then(move |(conn, buf)| {
match buf[0] {
COMMAND_ACTIVATE => app.borrow_mut().active = true,
COMMAND_DEACTIVATE => app.borrow_mut().active = false,
COMMAND_TRIGGER => app.borrow().trigger(),
x => eprintln!("unix socket: invalid command: {}", x)
}
Ok(Loop::Continue(conn))
})
}));
Ok(())
}))
}
#[cfg(feature = "pulse")]
let mut _tx_pulse = None; // Keep sender alive. This must be declared after _pulse so it's dropped before.
#[cfg(feature = "pulse")]
let mut _pulse = None; // Keep pulse alive
#[cfg(feature = "pulse")] {
if not_when_audio {
enum Event {
Clear,
New,
Finish
}
let (tx, rx) = mpsc::unbounded::<Event>();
// Can't do this last because we need the updated pointer
_tx_pulse = Some(tx);
let tx = _tx_pulse.as_mut().unwrap();
let pulse = PulseAudio::default();
extern "C" fn sink_info_callback(
_: *mut pa_context,
info: *const pa_sink_input_info,
_: i32,
userdata: *mut c_void
) {
unsafe {
let tx = userdata as *mut _ as *mut mpsc::UnboundedSender<Event>;
if info.is_null() {
(&*tx).unbounded_send(Event::Finish).unwrap();
} else if (*info).corked == 0 {
(&*tx).unbounded_send(Event::New).unwrap();
}
}
}
extern "C" fn subscribe_callback(
ctx: *mut pa_context,
_: pa_subscription_event_type_t,
_: u32,
userdata: *mut c_void
) {
unsafe {
let tx = userdata as *mut _ as *mut mpsc::UnboundedSender<Event>;
(&*tx).unbounded_send(Event::Clear).unwrap();
// You *could* keep track of events here (like making change events toggle the on/off status),
// but it's not reliable
pa_context_get_sink_input_info_list(ctx, Some(sink_info_callback), userdata);
}
}
extern "C" fn state_callback(ctx: *mut pa_context, userdata: *mut c_void) {
unsafe {
let state = pa_context_get_state(ctx);
if state == PA_CONTEXT_READY {
pa_context_set_subscribe_callback(ctx, Some(subscribe_callback), userdata);
pa_context_subscribe(ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, None, ptr::null_mut());
// In case audio already plays
pa_context_get_sink_input_info_list(ctx, Some(sink_info_callback), userdata);
}
}
}
let mut playing = 0;
let app = Rc::clone(&app);
handle.spawn(rx.for_each(move |event| {
match event {
Event::Clear => playing = 0,
Event::New => playing += 1,
Event::Finish => {
// We've successfully counted all playing inputs
app.borrow_mut().audio = playing != 0;
}
}
Ok(())
}));
let userdata = tx as *mut _ as *mut c_void;
unsafe {
pa_context_set_state_callback(pulse.ctx, Some(state_callback), userdata);
pa_context_connect(pulse.ctx, ptr::null(), 0, ptr::null());
pa_threaded_mainloop_start(pulse.main);
}
// Keep pulse alive
_pulse = Some(pulse);
}
}
let handle_clone = Rc::clone(&handle);
handle.spawn(future::loop_fn((), move |_| {
let mut tx_stop = tx_stop.clone();
let app = Rc::clone(&app);
let delay = app.borrow().delay;
Timeout::new(delay, &handle_clone)
.unwrap()
.map_err(|_| ())
.and_then(move |_| {
let step = app.borrow_mut().step();
if step.is_none() {
return Ok(Loop::Continue(()));
}
tx_stop.take().unwrap().send(()).wait().unwrap();
if step.unwrap() {
Ok(Loop::Break(()))
} else {
Err(())
}
})
}));
let status = core.run(rx_stop.into_future()).is_ok();
if let Some(socket) = socket {
if let Err(err) = fs::remove_file(socket) {
eprintln!("failed to clean up unix socket: {}", err);
}
}
status
}
}
struct App {
active: bool,
audio: bool,
delay: Duration,
display: *mut Display,
info: *mut XScreenSaverInfo,
not_when_fullscreen: bool,
once: bool,
time: u64,
timer: String,
notify: Option<u64>,
notifier: Option<String>,
canceller: Option<String>,
ran_notify: bool,
ran_timer: bool,
fullscreen: Option<bool>,
time_new: u64
}
impl App {
fn step(&mut self) -> Option<bool> {
let active = self.active && !self.audio;
// audio is always false when not-when-audio isn't set, don't worry
let default_delay = Duration::from_secs(SCALE); // TODO: const fn
let idle = if active {
Some(match get_idle(self.display, self.info) {
Ok(idle) => idle / 1000, // Convert to seconds
Err(_) => return Some(false)
})
} else { None };
if active &&
self.notify.map(|notify| (idle.unwrap() + notify) >= self.time).unwrap_or(idle.unwrap() >= self.time) {
let idle = idle.unwrap();
if self.not_when_fullscreen && self.fullscreen.is_none() {
let mut focus = 0u64;
let mut revert = 0i32;
let mut actual_type = 0u64;
let mut actual_format = 0i32;
let mut nitems = 0u64;
let mut bytes = 0u64;
let mut data: *mut u8 = unsafe { mem::uninitialized() };
self.fullscreen = Some(unsafe {
XGetInputFocus(self.display, &mut focus as *mut _, &mut revert as *mut _);
let cstring = CString::from_vec_unchecked("_NET_WM_STATE".into());
if XGetWindowProperty(
self.display,
focus,
XInternAtom(self.display, cstring.as_ptr(), 0),
0,
!0,
0,
XA_ATOM,
&mut actual_type,
&mut actual_format,
&mut nitems,
&mut bytes,
&mut data
) != 0 {
eprintln!("failed to get window property");
false
} else {
// Welcome to hell.
// I spent waay to long trying to get `data` to work.
// Currently it returns 75, because it overflows 331 to fit into a byte.
// Changing `data` to a *mut u64 gives me 210453397504.
// I have no idea why, and at this point I don't want to know.
// So here I'll just compare it to 75 and assume fullscreen.
let mut fullscreen = false;
for i in 0..nitems as isize {
let cstring = CString::from_vec_unchecked("_NET_WM_STATE_FULLSCREEN".into());
if *data.offset(i) == (XInternAtom(self.display, cstring.as_ptr(), 0) & 0xFF) as u8 {
fullscreen = true;
break;
}
}
XFree(data as *mut c_void);
fullscreen
}
});
}
if !self.not_when_fullscreen || !self.fullscreen.unwrap() {
if self.notify.is_some() && !self.ran_notify {
invoke(self.notifier.as_ref().unwrap());
self.ran_notify = true;
self.delay = Duration::from_secs(1);
// Since the delay is usually a minute, I could've exceeded both the notify and the timer.
// The simple solution is to change the timer to a point where it's guaranteed
// it's been _ seconds since the notifier.
self.time_new = idle + self.notify.unwrap();
} else if idle >= self.time_new && !self.ran_timer {
self.trigger();
if self.once {
return Some(true);
}
self.ran_timer = true;
self.delay = default_delay;
}
}
} else {
if self.ran_notify && !self.ran_timer {
// In case the user goes back from being idle between the notify and timer
if let Some(canceller) = self.canceller.as_ref() {
invoke(canceller);
}
}
self.delay = default_delay;
self.ran_notify = false;
self.ran_timer = false;
self.fullscreen = None;
}
None
}
fn trigger(&self) {
invoke(&self.timer);
}
}
fn get_idle(display: *mut Display, info: *mut XScreenSaverInfo) -> Result<u64, ()> {
if unsafe { XScreenSaverQueryInfo(display, XDefaultRootWindow(display), info) } == 0 {
eprintln!("failed to query screen saver info");
return Err(());
}
Ok(unsafe { (*info).idle })
}
fn invoke(cmd: &str) {
if let Err(err) =
Command::new("sh")
.arg("-c")
.arg(cmd)
.status() {
eprintln!("failed to invoke command: {}", err);
}
} |
thread::sleep(app.delay);
} | random_line_split |
main.rs | #[cfg(feature = "pulse")] extern crate libpulse_sys;
#[cfg(feature = "tokio")] extern crate ctrlc;
#[cfg(feature = "tokio")] extern crate futures;
#[cfg(feature = "tokio")] extern crate tokio_core;
#[cfg(feature = "tokio")] extern crate tokio_io;
#[cfg(feature = "tokio")] extern crate tokio_uds;
#[macro_use] extern crate clap;
extern crate x11;
#[cfg(feature = "pulse")] mod pulse;
#[cfg(feature = "pulse")] use libpulse_sys::context::*;
#[cfg(feature = "pulse")] use libpulse_sys::context::pa_context;
#[cfg(feature = "pulse")] use libpulse_sys::context::subscribe::pa_subscription_event_type_t;
#[cfg(feature = "pulse")] use libpulse_sys::mainloop::threaded::*;
#[cfg(feature = "pulse")] use pulse::PulseAudio;
#[cfg(feature = "tokio")] use futures::future::{self, Loop};
#[cfg(feature = "tokio")] use futures::sync::mpsc;
#[cfg(feature = "tokio")] use futures::{Future, Sink, Stream};
#[cfg(feature = "tokio")] use std::cell::RefCell;
#[cfg(feature = "tokio")] use std::fs;
#[cfg(feature = "tokio")] use std::io::ErrorKind;
#[cfg(feature = "tokio")] use std::rc::Rc;
#[cfg(feature = "tokio")] use tokio_core::reactor::{Core, Timeout};
#[cfg(feature = "tokio")] use tokio_io::io;
#[cfg(feature = "tokio")] use tokio_uds::UnixListener;
#[cfg(not(feature = "tokio"))] use std::thread;
use clap::{App as ClapApp, Arg};
use std::ffi::CString;
use std::os::raw::c_void;
use std::process::Command;
use std::{mem, ptr};
use std::time::Duration;
use x11::xlib::{Display, XA_ATOM, XCloseDisplay, XDefaultRootWindow, XFree, XGetInputFocus, XGetWindowProperty,
XInternAtom, XOpenDisplay};
use x11::xss::{XScreenSaverAllocInfo, XScreenSaverInfo, XScreenSaverQueryInfo};
struct DeferXClose(*mut Display);
impl Drop for DeferXClose {
fn drop(&mut self) {
unsafe { XCloseDisplay(self.0); }
}
}
struct DeferXFree(*mut c_void);
impl Drop for DeferXFree {
fn drop(&mut self) {
unsafe { XFree(self.0); }
}
}
const SCALE: u64 = 60; // Second:minute scale. Can be changed for debugging purposes.
#[cfg(feature = "tokio")] const COMMAND_DEACTIVATE: u8 = 0;
#[cfg(feature = "tokio")] const COMMAND_ACTIVATE: u8 = 1;
#[cfg(feature = "tokio")] const COMMAND_TRIGGER: u8 = 2;
fn main() {
let success = do_main();
std::process::exit(if success { 0 } else { 1 });
}
fn do_main() -> bool {
let clap_app = ClapApp::new(crate_name!())
.author(crate_authors!())
.version(crate_version!())
// Flags
.arg(
Arg::with_name("print")
.help("Print the idle time to standard output. This is similar to xprintidle.")
.long("print")
)
.arg(
Arg::with_name("not-when-fullscreen")
.help("Don't invoke the timer when the current application is fullscreen. \
Useful for preventing the lockscreen when watching videos")
.long("not-when-fullscreen")
.conflicts_with("print")
)
.arg(
Arg::with_name("once")
.help("Exit after timer command has been invoked once. \
This does not include manual invoking using the socket.")
.long("once")
.conflicts_with("print")
)
// Options
.arg(
Arg::with_name("time")
.help("Set the required amount of idle minutes before invoking timer")
.long("time")
.takes_value(true)
.required_unless("print")
.conflicts_with("print")
)
.arg(
Arg::with_name("timer")
.help("Set command to run when the timer goes off")
.long("timer")
.takes_value(true)
.required_unless("print")
.conflicts_with("print")
)
.arg(
Arg::with_name("notify")
.help("Run the command passed by --notifier _ seconds before timer goes off")
.long("notify")
.takes_value(true)
.requires("notifier")
.conflicts_with("print")
)
.arg(
Arg::with_name("notifier")
.help("Set the command to run when notifier goes off (see --notify)")
.long("notifier")
.takes_value(true)
.requires("notify")
.conflicts_with("print")
)
.arg(
Arg::with_name("canceller")
.help("Set the command to run when user cancels the timer after the notifier has already gone off")
.long("canceller")
.takes_value(true)
.requires("notify")
.conflicts_with("print")
);
#[cfg(feature = "tokio")]
let mut clap_app = clap_app; // make mutable
#[cfg(feature = "tokio")] {
clap_app = clap_app
.arg(
Arg::with_name("socket")
.help("Listen to events over a unix socket")
.long("socket")
.takes_value(true)
.conflicts_with("print")
);
}
#[cfg(feature = "pulse")] {
clap_app = clap_app
.arg(
Arg::with_name("not-when-audio")
.help("Don't invoke the timer when any audio is playing (PulseAudio specific)")
.long("not-when-audio")
.conflicts_with("print")
);
}
let matches = clap_app.get_matches();
let display = unsafe { XOpenDisplay(ptr::null()) };
if display.is_null() {
eprintln!("failed to open x server");
return false;
}
let _cleanup = DeferXClose(display);
let info = unsafe { XScreenSaverAllocInfo() };
let _cleanup = DeferXFree(info as *mut c_void);
if matches.is_present("print") {
if let Ok(idle) = get_idle(display, info) {
println!("{}", idle);
}
return true;
}
let time = value_t_or_exit!(matches, "time", u32) as u64 * SCALE;
let app = App {
active: true,
audio: false,
delay: Duration::from_secs(SCALE),
display: display,
info: info,
not_when_fullscreen: matches.is_present("not-when-fullscreen"),
once: matches.is_present("once"),
time: time,
timer: matches.value_of("timer").unwrap().to_string(),
notify: value_t!(matches, "notify", u32).ok().map(|notify| notify as u64),
notifier: matches.value_of("notifier").map(String::from),
canceller: matches.value_of("canceller").map(String::from),
ran_notify: false,
ran_timer: false,
fullscreen: None,
time_new: time
};
#[cfg(not(feature = "tokio"))] {
let mut app = app;
loop {
if let Some(exit) = app.step() {
return exit;
}
thread::sleep(app.delay);
}
}
#[cfg(feature = "tokio")] {
#[cfg(feature = "pulse")]
let not_when_audio = matches.is_present("not-when-audio");
let socket = matches.value_of("socket");
let app = Rc::new(RefCell::new(app));
let mut core = Core::new().unwrap();
let handle = Rc::new(core.handle());
let (tx_stop, rx_stop) = mpsc::channel(1);
let tx_stop = Some(tx_stop);
let tx_stop_clone = RefCell::new(tx_stop.clone());
if let Err(err) =
ctrlc::set_handler(move || {
if let Some(tx_stop) = tx_stop_clone.borrow_mut().take() {
tx_stop.send(()).wait().unwrap();
}
}) {
eprintln!("failed to create signal handler: {}", err);
}
if let Some(socket) = socket {
let listener = match UnixListener::bind(socket, &handle) {
Ok(listener) => listener,
Err(err) => {
eprintln!("failed to bind unix socket: {}", err);
return false;
}
};
let app = Rc::clone(&app);
let handle_clone = Rc::clone(&handle);
handle.spawn(listener.incoming()
.map_err(|err| eprintln!("listener error: {}", err))
.for_each(move |(conn, _)| {
let app = Rc::clone(&app);
handle_clone.spawn(future::loop_fn(conn, move |conn| {
let app = Rc::clone(&app);
io::read_exact(conn, [0; 1])
.map_err(|err| {
if err.kind() != ErrorKind::UnexpectedEof {
eprintln!("io error: {}", err);
}
})
.and_then(move |(conn, buf)| {
match buf[0] {
COMMAND_ACTIVATE => app.borrow_mut().active = true,
COMMAND_DEACTIVATE => app.borrow_mut().active = false,
COMMAND_TRIGGER => app.borrow().trigger(),
x => eprintln!("unix socket: invalid command: {}", x)
}
Ok(Loop::Continue(conn))
})
}));
Ok(())
}))
}
#[cfg(feature = "pulse")]
let mut _tx_pulse = None; // Keep sender alive. This must be declared after _pulse so it's dropped before.
#[cfg(feature = "pulse")]
let mut _pulse = None; // Keep pulse alive
#[cfg(feature = "pulse")] {
if not_when_audio {
enum Event {
Clear,
New,
Finish
}
let (tx, rx) = mpsc::unbounded::<Event>();
// Can't do this last because we need the updated pointer
_tx_pulse = Some(tx);
let tx = _tx_pulse.as_mut().unwrap();
let pulse = PulseAudio::default();
extern "C" fn sink_info_callback(
_: *mut pa_context,
info: *const pa_sink_input_info,
_: i32,
userdata: *mut c_void
) {
unsafe {
let tx = userdata as *mut _ as *mut mpsc::UnboundedSender<Event>;
if info.is_null() {
(&*tx).unbounded_send(Event::Finish).unwrap();
} else if (*info).corked == 0 {
(&*tx).unbounded_send(Event::New).unwrap();
}
}
}
extern "C" fn subscribe_callback(
ctx: *mut pa_context,
_: pa_subscription_event_type_t,
_: u32,
userdata: *mut c_void
) {
unsafe {
let tx = userdata as *mut _ as *mut mpsc::UnboundedSender<Event>;
(&*tx).unbounded_send(Event::Clear).unwrap();
// You *could* keep track of events here (like making change events toggle the on/off status),
// but it's not reliable
pa_context_get_sink_input_info_list(ctx, Some(sink_info_callback), userdata);
}
}
extern "C" fn state_callback(ctx: *mut pa_context, userdata: *mut c_void) |
let mut playing = 0;
let app = Rc::clone(&app);
handle.spawn(rx.for_each(move |event| {
match event {
Event::Clear => playing = 0,
Event::New => playing += 1,
Event::Finish => {
// We've successfully counted all playing inputs
app.borrow_mut().audio = playing != 0;
}
}
Ok(())
}));
let userdata = tx as *mut _ as *mut c_void;
unsafe {
pa_context_set_state_callback(pulse.ctx, Some(state_callback), userdata);
pa_context_connect(pulse.ctx, ptr::null(), 0, ptr::null());
pa_threaded_mainloop_start(pulse.main);
}
// Keep pulse alive
_pulse = Some(pulse);
}
}
let handle_clone = Rc::clone(&handle);
handle.spawn(future::loop_fn((), move |_| {
let mut tx_stop = tx_stop.clone();
let app = Rc::clone(&app);
let delay = app.borrow().delay;
Timeout::new(delay, &handle_clone)
.unwrap()
.map_err(|_| ())
.and_then(move |_| {
let step = app.borrow_mut().step();
if step.is_none() {
return Ok(Loop::Continue(()));
}
tx_stop.take().unwrap().send(()).wait().unwrap();
if step.unwrap() {
Ok(Loop::Break(()))
} else {
Err(())
}
})
}));
let status = core.run(rx_stop.into_future()).is_ok();
if let Some(socket) = socket {
if let Err(err) = fs::remove_file(socket) {
eprintln!("failed to clean up unix socket: {}", err);
}
}
status
}
}
struct App {
active: bool,
audio: bool,
delay: Duration,
display: *mut Display,
info: *mut XScreenSaverInfo,
not_when_fullscreen: bool,
once: bool,
time: u64,
timer: String,
notify: Option<u64>,
notifier: Option<String>,
canceller: Option<String>,
ran_notify: bool,
ran_timer: bool,
fullscreen: Option<bool>,
time_new: u64
}
impl App {
fn step(&mut self) -> Option<bool> {
let active = self.active && !self.audio;
// audio is always false when not-when-audio isn't set, don't worry
let default_delay = Duration::from_secs(SCALE); // TODO: const fn
let idle = if active {
Some(match get_idle(self.display, self.info) {
Ok(idle) => idle / 1000, // Convert to seconds
Err(_) => return Some(false)
})
} else { None };
if active &&
self.notify.map(|notify| (idle.unwrap() + notify) >= self.time).unwrap_or(idle.unwrap() >= self.time) {
let idle = idle.unwrap();
if self.not_when_fullscreen && self.fullscreen.is_none() {
let mut focus = 0u64;
let mut revert = 0i32;
let mut actual_type = 0u64;
let mut actual_format = 0i32;
let mut nitems = 0u64;
let mut bytes = 0u64;
let mut data: *mut u8 = unsafe { mem::uninitialized() };
self.fullscreen = Some(unsafe {
XGetInputFocus(self.display, &mut focus as *mut _, &mut revert as *mut _);
let cstring = CString::from_vec_unchecked("_NET_WM_STATE".into());
if XGetWindowProperty(
self.display,
focus,
XInternAtom(self.display, cstring.as_ptr(), 0),
0,
!0,
0,
XA_ATOM,
&mut actual_type,
&mut actual_format,
&mut nitems,
&mut bytes,
&mut data
) != 0 {
eprintln!("failed to get window property");
false
} else {
// Welcome to hell.
// I spent waay to long trying to get `data` to work.
// Currently it returns 75, because it overflows 331 to fit into a byte.
// Changing `data` to a *mut u64 gives me 210453397504.
// I have no idea why, and at this point I don't want to know.
// So here I'll just compare it to 75 and assume fullscreen.
let mut fullscreen = false;
for i in 0..nitems as isize {
let cstring = CString::from_vec_unchecked("_NET_WM_STATE_FULLSCREEN".into());
if *data.offset(i) == (XInternAtom(self.display, cstring.as_ptr(), 0) & 0xFF) as u8 {
fullscreen = true;
break;
}
}
XFree(data as *mut c_void);
fullscreen
}
});
}
if !self.not_when_fullscreen || !self.fullscreen.unwrap() {
if self.notify.is_some() && !self.ran_notify {
invoke(self.notifier.as_ref().unwrap());
self.ran_notify = true;
self.delay = Duration::from_secs(1);
// Since the delay is usually a minute, I could've exceeded both the notify and the timer.
// The simple solution is to change the timer to a point where it's guaranteed
// it's been _ seconds since the notifier.
self.time_new = idle + self.notify.unwrap();
} else if idle >= self.time_new && !self.ran_timer {
self.trigger();
if self.once {
return Some(true);
}
self.ran_timer = true;
self.delay = default_delay;
}
}
} else {
if self.ran_notify && !self.ran_timer {
// In case the user goes back from being idle between the notify and timer
if let Some(canceller) = self.canceller.as_ref() {
invoke(canceller);
}
}
self.delay = default_delay;
self.ran_notify = false;
self.ran_timer = false;
self.fullscreen = None;
}
None
}
fn trigger(&self) {
invoke(&self.timer);
}
}
fn get_idle(display: *mut Display, info: *mut XScreenSaverInfo) -> Result<u64, ()> {
if unsafe { XScreenSaverQueryInfo(display, XDefaultRootWindow(display), info) } == 0 {
eprintln!("failed to query screen saver info");
return Err(());
}
Ok(unsafe { (*info).idle })
}
fn invoke(cmd: &str) {
if let Err(err) =
Command::new("sh")
.arg("-c")
.arg(cmd)
.status() {
eprintln!("failed to invoke command: {}", err);
}
}
| {
unsafe {
let state = pa_context_get_state(ctx);
if state == PA_CONTEXT_READY {
pa_context_set_subscribe_callback(ctx, Some(subscribe_callback), userdata);
pa_context_subscribe(ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, None, ptr::null_mut());
// In case audio already plays
pa_context_get_sink_input_info_list(ctx, Some(sink_info_callback), userdata);
}
}
} | identifier_body |
main.rs | #[cfg(feature = "pulse")] extern crate libpulse_sys;
#[cfg(feature = "tokio")] extern crate ctrlc;
#[cfg(feature = "tokio")] extern crate futures;
#[cfg(feature = "tokio")] extern crate tokio_core;
#[cfg(feature = "tokio")] extern crate tokio_io;
#[cfg(feature = "tokio")] extern crate tokio_uds;
#[macro_use] extern crate clap;
extern crate x11;
#[cfg(feature = "pulse")] mod pulse;
#[cfg(feature = "pulse")] use libpulse_sys::context::*;
#[cfg(feature = "pulse")] use libpulse_sys::context::pa_context;
#[cfg(feature = "pulse")] use libpulse_sys::context::subscribe::pa_subscription_event_type_t;
#[cfg(feature = "pulse")] use libpulse_sys::mainloop::threaded::*;
#[cfg(feature = "pulse")] use pulse::PulseAudio;
#[cfg(feature = "tokio")] use futures::future::{self, Loop};
#[cfg(feature = "tokio")] use futures::sync::mpsc;
#[cfg(feature = "tokio")] use futures::{Future, Sink, Stream};
#[cfg(feature = "tokio")] use std::cell::RefCell;
#[cfg(feature = "tokio")] use std::fs;
#[cfg(feature = "tokio")] use std::io::ErrorKind;
#[cfg(feature = "tokio")] use std::rc::Rc;
#[cfg(feature = "tokio")] use tokio_core::reactor::{Core, Timeout};
#[cfg(feature = "tokio")] use tokio_io::io;
#[cfg(feature = "tokio")] use tokio_uds::UnixListener;
#[cfg(not(feature = "tokio"))] use std::thread;
use clap::{App as ClapApp, Arg};
use std::ffi::CString;
use std::os::raw::c_void;
use std::process::Command;
use std::{mem, ptr};
use std::time::Duration;
use x11::xlib::{Display, XA_ATOM, XCloseDisplay, XDefaultRootWindow, XFree, XGetInputFocus, XGetWindowProperty,
XInternAtom, XOpenDisplay};
use x11::xss::{XScreenSaverAllocInfo, XScreenSaverInfo, XScreenSaverQueryInfo};
struct DeferXClose(*mut Display);
impl Drop for DeferXClose {
fn drop(&mut self) {
unsafe { XCloseDisplay(self.0); }
}
}
struct DeferXFree(*mut c_void);
impl Drop for DeferXFree {
fn drop(&mut self) {
unsafe { XFree(self.0); }
}
}
const SCALE: u64 = 60; // Second:minute scale. Can be changed for debugging purposes.
#[cfg(feature = "tokio")] const COMMAND_DEACTIVATE: u8 = 0;
#[cfg(feature = "tokio")] const COMMAND_ACTIVATE: u8 = 1;
#[cfg(feature = "tokio")] const COMMAND_TRIGGER: u8 = 2;
fn main() {
let success = do_main();
std::process::exit(if success { 0 } else { 1 });
}
fn do_main() -> bool {
let clap_app = ClapApp::new(crate_name!())
.author(crate_authors!())
.version(crate_version!())
// Flags
.arg(
Arg::with_name("print")
.help("Print the idle time to standard output. This is similar to xprintidle.")
.long("print")
)
.arg(
Arg::with_name("not-when-fullscreen")
.help("Don't invoke the timer when the current application is fullscreen. \
Useful for preventing the lockscreen when watching videos")
.long("not-when-fullscreen")
.conflicts_with("print")
)
.arg(
Arg::with_name("once")
.help("Exit after timer command has been invoked once. \
This does not include manual invoking using the socket.")
.long("once")
.conflicts_with("print")
)
// Options
.arg(
Arg::with_name("time")
.help("Set the required amount of idle minutes before invoking timer")
.long("time")
.takes_value(true)
.required_unless("print")
.conflicts_with("print")
)
.arg(
Arg::with_name("timer")
.help("Set command to run when the timer goes off")
.long("timer")
.takes_value(true)
.required_unless("print")
.conflicts_with("print")
)
.arg(
Arg::with_name("notify")
.help("Run the command passed by --notifier _ seconds before timer goes off")
.long("notify")
.takes_value(true)
.requires("notifier")
.conflicts_with("print")
)
.arg(
Arg::with_name("notifier")
.help("Set the command to run when notifier goes off (see --notify)")
.long("notifier")
.takes_value(true)
.requires("notify")
.conflicts_with("print")
)
.arg(
Arg::with_name("canceller")
.help("Set the command to run when user cancels the timer after the notifier has already gone off")
.long("canceller")
.takes_value(true)
.requires("notify")
.conflicts_with("print")
);
#[cfg(feature = "tokio")]
let mut clap_app = clap_app; // make mutable
#[cfg(feature = "tokio")] {
clap_app = clap_app
.arg(
Arg::with_name("socket")
.help("Listen to events over a unix socket")
.long("socket")
.takes_value(true)
.conflicts_with("print")
);
}
#[cfg(feature = "pulse")] {
clap_app = clap_app
.arg(
Arg::with_name("not-when-audio")
.help("Don't invoke the timer when any audio is playing (PulseAudio specific)")
.long("not-when-audio")
.conflicts_with("print")
);
}
let matches = clap_app.get_matches();
let display = unsafe { XOpenDisplay(ptr::null()) };
if display.is_null() {
eprintln!("failed to open x server");
return false;
}
let _cleanup = DeferXClose(display);
let info = unsafe { XScreenSaverAllocInfo() };
let _cleanup = DeferXFree(info as *mut c_void);
if matches.is_present("print") {
if let Ok(idle) = get_idle(display, info) {
println!("{}", idle);
}
return true;
}
let time = value_t_or_exit!(matches, "time", u32) as u64 * SCALE;
let app = App {
active: true,
audio: false,
delay: Duration::from_secs(SCALE),
display: display,
info: info,
not_when_fullscreen: matches.is_present("not-when-fullscreen"),
once: matches.is_present("once"),
time: time,
timer: matches.value_of("timer").unwrap().to_string(),
notify: value_t!(matches, "notify", u32).ok().map(|notify| notify as u64),
notifier: matches.value_of("notifier").map(String::from),
canceller: matches.value_of("canceller").map(String::from),
ran_notify: false,
ran_timer: false,
fullscreen: None,
time_new: time
};
#[cfg(not(feature = "tokio"))] {
let mut app = app;
loop {
if let Some(exit) = app.step() {
return exit;
}
thread::sleep(app.delay);
}
}
#[cfg(feature = "tokio")] {
#[cfg(feature = "pulse")]
let not_when_audio = matches.is_present("not-when-audio");
let socket = matches.value_of("socket");
let app = Rc::new(RefCell::new(app));
let mut core = Core::new().unwrap();
let handle = Rc::new(core.handle());
let (tx_stop, rx_stop) = mpsc::channel(1);
let tx_stop = Some(tx_stop);
let tx_stop_clone = RefCell::new(tx_stop.clone());
if let Err(err) =
ctrlc::set_handler(move || {
if let Some(tx_stop) = tx_stop_clone.borrow_mut().take() {
tx_stop.send(()).wait().unwrap();
}
}) {
eprintln!("failed to create signal handler: {}", err);
}
if let Some(socket) = socket {
let listener = match UnixListener::bind(socket, &handle) {
Ok(listener) => listener,
Err(err) => {
eprintln!("failed to bind unix socket: {}", err);
return false;
}
};
let app = Rc::clone(&app);
let handle_clone = Rc::clone(&handle);
handle.spawn(listener.incoming()
.map_err(|err| eprintln!("listener error: {}", err))
.for_each(move |(conn, _)| {
let app = Rc::clone(&app);
handle_clone.spawn(future::loop_fn(conn, move |conn| {
let app = Rc::clone(&app);
io::read_exact(conn, [0; 1])
.map_err(|err| {
if err.kind() != ErrorKind::UnexpectedEof {
eprintln!("io error: {}", err);
}
})
.and_then(move |(conn, buf)| {
match buf[0] {
COMMAND_ACTIVATE => app.borrow_mut().active = true,
COMMAND_DEACTIVATE => app.borrow_mut().active = false,
COMMAND_TRIGGER => app.borrow().trigger(),
x => eprintln!("unix socket: invalid command: {}", x)
}
Ok(Loop::Continue(conn))
})
}));
Ok(())
}))
}
#[cfg(feature = "pulse")]
let mut _tx_pulse = None; // Keep sender alive. This must be declared after _pulse so it's dropped before.
#[cfg(feature = "pulse")]
let mut _pulse = None; // Keep pulse alive
#[cfg(feature = "pulse")] {
if not_when_audio {
enum Event {
Clear,
New,
Finish
}
let (tx, rx) = mpsc::unbounded::<Event>();
// Can't do this last because we need the updated pointer
_tx_pulse = Some(tx);
let tx = _tx_pulse.as_mut().unwrap();
let pulse = PulseAudio::default();
extern "C" fn sink_info_callback(
_: *mut pa_context,
info: *const pa_sink_input_info,
_: i32,
userdata: *mut c_void
) {
unsafe {
let tx = userdata as *mut _ as *mut mpsc::UnboundedSender<Event>;
if info.is_null() {
(&*tx).unbounded_send(Event::Finish).unwrap();
} else if (*info).corked == 0 {
(&*tx).unbounded_send(Event::New).unwrap();
}
}
}
extern "C" fn subscribe_callback(
ctx: *mut pa_context,
_: pa_subscription_event_type_t,
_: u32,
userdata: *mut c_void
) {
unsafe {
let tx = userdata as *mut _ as *mut mpsc::UnboundedSender<Event>;
(&*tx).unbounded_send(Event::Clear).unwrap();
// You *could* keep track of events here (like making change events toggle the on/off status),
// but it's not reliable
pa_context_get_sink_input_info_list(ctx, Some(sink_info_callback), userdata);
}
}
extern "C" fn state_callback(ctx: *mut pa_context, userdata: *mut c_void) {
unsafe {
let state = pa_context_get_state(ctx);
if state == PA_CONTEXT_READY {
pa_context_set_subscribe_callback(ctx, Some(subscribe_callback), userdata);
pa_context_subscribe(ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, None, ptr::null_mut());
// In case audio already plays
pa_context_get_sink_input_info_list(ctx, Some(sink_info_callback), userdata);
}
}
}
let mut playing = 0;
let app = Rc::clone(&app);
handle.spawn(rx.for_each(move |event| {
match event {
Event::Clear => playing = 0,
Event::New => playing += 1,
Event::Finish => {
// We've successfully counted all playing inputs
app.borrow_mut().audio = playing != 0;
}
}
Ok(())
}));
let userdata = tx as *mut _ as *mut c_void;
unsafe {
pa_context_set_state_callback(pulse.ctx, Some(state_callback), userdata);
pa_context_connect(pulse.ctx, ptr::null(), 0, ptr::null());
pa_threaded_mainloop_start(pulse.main);
}
// Keep pulse alive
_pulse = Some(pulse);
}
}
let handle_clone = Rc::clone(&handle);
handle.spawn(future::loop_fn((), move |_| {
let mut tx_stop = tx_stop.clone();
let app = Rc::clone(&app);
let delay = app.borrow().delay;
Timeout::new(delay, &handle_clone)
.unwrap()
.map_err(|_| ())
.and_then(move |_| {
let step = app.borrow_mut().step();
if step.is_none() {
return Ok(Loop::Continue(()));
}
tx_stop.take().unwrap().send(()).wait().unwrap();
if step.unwrap() {
Ok(Loop::Break(()))
} else {
Err(())
}
})
}));
let status = core.run(rx_stop.into_future()).is_ok();
if let Some(socket) = socket {
if let Err(err) = fs::remove_file(socket) {
eprintln!("failed to clean up unix socket: {}", err);
}
}
status
}
}
struct App {
active: bool,
audio: bool,
delay: Duration,
display: *mut Display,
info: *mut XScreenSaverInfo,
not_when_fullscreen: bool,
once: bool,
time: u64,
timer: String,
notify: Option<u64>,
notifier: Option<String>,
canceller: Option<String>,
ran_notify: bool,
ran_timer: bool,
fullscreen: Option<bool>,
time_new: u64
}
impl App {
fn step(&mut self) -> Option<bool> {
let active = self.active && !self.audio;
// audio is always false when not-when-audio isn't set, don't worry
let default_delay = Duration::from_secs(SCALE); // TODO: const fn
let idle = if active {
Some(match get_idle(self.display, self.info) {
Ok(idle) => idle / 1000, // Convert to seconds
Err(_) => return Some(false)
})
} else { None };
if active &&
self.notify.map(|notify| (idle.unwrap() + notify) >= self.time).unwrap_or(idle.unwrap() >= self.time) {
let idle = idle.unwrap();
if self.not_when_fullscreen && self.fullscreen.is_none() {
let mut focus = 0u64;
let mut revert = 0i32;
let mut actual_type = 0u64;
let mut actual_format = 0i32;
let mut nitems = 0u64;
let mut bytes = 0u64;
let mut data: *mut u8 = unsafe { mem::uninitialized() };
self.fullscreen = Some(unsafe {
XGetInputFocus(self.display, &mut focus as *mut _, &mut revert as *mut _);
let cstring = CString::from_vec_unchecked("_NET_WM_STATE".into());
if XGetWindowProperty(
self.display,
focus,
XInternAtom(self.display, cstring.as_ptr(), 0),
0,
!0,
0,
XA_ATOM,
&mut actual_type,
&mut actual_format,
&mut nitems,
&mut bytes,
&mut data
) != 0 {
eprintln!("failed to get window property");
false
} else {
// Welcome to hell.
// I spent waay to long trying to get `data` to work.
// Currently it returns 75, because it overflows 331 to fit into a byte.
// Changing `data` to a *mut u64 gives me 210453397504.
// I have no idea why, and at this point I don't want to know.
// So here I'll just compare it to 75 and assume fullscreen.
let mut fullscreen = false;
for i in 0..nitems as isize {
let cstring = CString::from_vec_unchecked("_NET_WM_STATE_FULLSCREEN".into());
if *data.offset(i) == (XInternAtom(self.display, cstring.as_ptr(), 0) & 0xFF) as u8 {
fullscreen = true;
break;
}
}
XFree(data as *mut c_void);
fullscreen
}
});
}
if !self.not_when_fullscreen || !self.fullscreen.unwrap() {
if self.notify.is_some() && !self.ran_notify {
invoke(self.notifier.as_ref().unwrap());
self.ran_notify = true;
self.delay = Duration::from_secs(1);
// Since the delay is usually a minute, I could've exceeded both the notify and the timer.
// The simple solution is to change the timer to a point where it's guaranteed
// it's been _ seconds since the notifier.
self.time_new = idle + self.notify.unwrap();
} else if idle >= self.time_new && !self.ran_timer {
self.trigger();
if self.once {
return Some(true);
}
self.ran_timer = true;
self.delay = default_delay;
}
}
} else {
if self.ran_notify && !self.ran_timer {
// In case the user goes back from being idle between the notify and timer
if let Some(canceller) = self.canceller.as_ref() {
invoke(canceller);
}
}
self.delay = default_delay;
self.ran_notify = false;
self.ran_timer = false;
self.fullscreen = None;
}
None
}
fn trigger(&self) {
invoke(&self.timer);
}
}
fn | (display: *mut Display, info: *mut XScreenSaverInfo) -> Result<u64, ()> {
if unsafe { XScreenSaverQueryInfo(display, XDefaultRootWindow(display), info) } == 0 {
eprintln!("failed to query screen saver info");
return Err(());
}
Ok(unsafe { (*info).idle })
}
fn invoke(cmd: &str) {
if let Err(err) =
Command::new("sh")
.arg("-c")
.arg(cmd)
.status() {
eprintln!("failed to invoke command: {}", err);
}
}
| get_idle | identifier_name |
install.go | package cmd
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"path"
"path/filepath"
"regexp"
"strings"
"time"
"docker.io/go-docker"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/filters"
"github.com/appcelerator/amp/docker/cli/cli/command"
"github.com/appcelerator/amp/docker/cli/cli/command/stack"
"github.com/appcelerator/amp/docker/cli/opts"
"github.com/appcelerator/amp/docker/docker/pkg/term"
ampdocker "github.com/appcelerator/amp/pkg/docker"
"github.com/spf13/cobra"
)
const (
TARGET_SINGLE = "single"
TARGET_CLUSTER = "cluster"
)
type InstallOptions struct {
NoLogs bool
NoMetrics bool
NoProxy bool
}
var InstallOpts = &InstallOptions{}
var Docker = ampdocker.NewClient(ampdocker.DefaultURL, ampdocker.DefaultVersion)
func NewInstallCommand() *cobra.Command {
installCmd := &cobra.Command{
Use: "install",
Short: "Set up amp services in swarm environment",
RunE: Install,
}
return installCmd
}
func Install(cmd *cobra.Command, args []string) error {
stdin, stdout, stderr := term.StdStreams()
dockerCli := ampdocker.NewDockerCli(stdin, stdout, stderr)
if err := Docker.Connect(); err != nil {
return err
}
// Create initial secrets
createInitialSecrets()
// Create initial configs
createInitialConfigs()
// Create initial networks
createInitialNetworks()
namespace := "amp"
if len(args) > 0 && args[0] != "" {
namespace = args[0]
}
// Handle interrupt signal
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
log.Println("\nReceived an interrupt signal - removing AMP services")
stack.RunRemove(dockerCli, stack.RemoveOptions{Namespaces: []string{namespace}})
os.Exit(1)
}
}()
deploymentMode, err := serviceDeploymentMode(dockerCli.Client(), "amp.type.kv", "true")
if err != nil {
return err
}
stackFiles, err := getStackFiles("./stacks", deploymentMode)
if err != nil {
return err
}
for _, stackFile := range stackFiles {
if strings.Contains(stackFile, "logs") && InstallOpts.NoLogs ||
strings.Contains(stackFile, "metrics") && InstallOpts.NoMetrics ||
strings.Contains(stackFile, "proxy") && InstallOpts.NoProxy {
continue
}
log.Println("Deploying stack", stackFile)
if err := deploy(dockerCli, stackFile, namespace); err != nil {
stack.RunRemove(dockerCli, stack.RemoveOptions{Namespaces: []string{namespace}})
return err
}
}
return nil
}
// returns the deployment mode
// based on the number of nodes with the label passed as argument
// if number of nodes > 2, mode = cluster, else mode = single
func serviceDeploymentMode(c docker.APIClient, labelKey string, labelValue string) (string, error) {
// unfortunately filtering labels on NodeList won't work as expected, Cf. https://github.com/moby/moby/issues/27231
nodes, err := c.NodeList(context.Background(), types.NodeListOptions{})
if err != nil {
return "", err
}
matchingNodes := 0
for _, node := range nodes {
// node is a swarm.Node
for k, v := range node.Spec.Labels {
if k == labelKey {
if labelValue == "" || labelValue == v {
matchingNodes++
}
}
}
}
switch matchingNodes {
case 0:
return "", fmt.Errorf("can't find a node with label %s", labelKey)
case 1:
fallthrough
case 2:
return TARGET_SINGLE, nil
default:
return TARGET_CLUSTER, nil
}
}
// returns sorted list of yaml file pathnames
func getStackFiles(path string, deploymentMode string) ([]string, error) {
if path == "" {
path = "./stacks"
}
path += "/" + deploymentMode
// a bit more work but we can't just use filepath.Glob
// since we need to match both *.yml and *.yaml
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
stackfiles := []string{}
for _, f := range files {
name := f.Name()
if matched, _ := regexp.MatchString("\\.ya?ml$", name); matched {
stackfiles = append(stackfiles, filepath.Join(path, name))
}
}
return stackfiles, nil
}
func deploy(d *command.DockerCli, stackfile string, namespace string) error {
if namespace == "" {
// use the stackfile basename as the default stack namespace
namespace = filepath.Base(stackfile)
namespace = strings.TrimSuffix(namespace, filepath.Ext(namespace))
}
options := stack.DeployOptions{
Namespace: namespace,
Composefile: stackfile,
ResolveImage: stack.ResolveImageAlways,
SendRegistryAuth: false,
Prune: false,
}
if err := stack.RunDeploy(d, options); err != nil {
return err
}
for _, err := range Docker.WaitOnStack(context.Background(), namespace, os.Stdout) {
if err != nil {
return err
}
}
return nil
}
// AMP configs map: Config name paired to config file in ./defaults
var ampConfigs = map[string]string{
"prometheus_alerts_rules": "prometheus_alerts.rules",
}
// This is the default configs path
const defaultConfigsPath = "defaults"
func createInitialConfigs() error {
// Computing config path
configPath := path.Join("/", defaultConfigsPath)
pe, err := pathExists(configPath)
if err != nil {
return err
}
if !pe {
configPath = defaultConfigsPath
}
configPath, err = filepath.Abs(configPath)
if err != nil {
return err
}
log.Println("Using the following path for configs:", configPath)
// Creating configs
for config, filename := range ampConfigs {
// Check if config already exists
exists, err := Docker.ConfigExists(config)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing config:", config)
continue
}
// Load config data
data, err := ioutil.ReadFile(path.Join(configPath, filename))
if err != nil {
return err
}
// Create config
if _, err := Docker.CreateConfig(config, data); err != nil {
return err
}
log.Println("Successfully created config:", config)
}
return nil
}
// AMP secrets map: Secret name paired to secret file in ./defaults
var ampSecrets = map[string]string{
"alertmanager_yml": "alertmanager.yml",
"amplifier_yml": "amplifier.yml",
"certificate_amp": "certificate.amp",
}
// This is the default secrets path
const defaultSecretsPath = "defaults"
// exists returns whether the given file or directory exists or not
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil { | if os.IsNotExist(err) {
return false, nil
}
return true, err
}
func createInitialSecrets() error {
// Computing secret path
secretPath := path.Join("/", defaultSecretsPath)
pe, err := pathExists(secretPath)
if err != nil {
return err
}
if !pe {
secretPath = defaultSecretsPath
}
secretPath, err = filepath.Abs(secretPath)
if err != nil {
return err
}
log.Println("Using the following path for secrets:", secretPath)
// Creating secrets
for secret, filename := range ampSecrets {
// Check if secret already exists
exists, err := Docker.SecretExists(secret)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing secret:", secret)
continue
}
// Load secret data
data, err := ioutil.ReadFile(path.Join(secretPath, filename))
if err != nil {
return err
}
// Create secret
if _, err := Docker.CreateSecret(secret, data); err != nil {
return err
}
log.Println("Successfully created secret:", secret)
}
return nil
}
var ampnetworks = []string{"public", "monit", "core"}
func createInitialNetworks() error {
for _, network := range ampnetworks {
// Check if network already exists
exists, err := Docker.NetworkExists(network)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing network:", network)
continue
}
if _, err := Docker.CreateNetwork(network, true, true); err != nil {
return err
}
log.Println("Successfully created network:", network)
}
return nil
}
func removeInitialNetworks() error {
for _, network := range ampnetworks {
// Check if network already exists
id, err := Docker.NetworkID(network)
if err != nil {
return err
}
if id == "" {
continue // Skipping non existent network
}
// Remove network
if err := Docker.RemoveNetwork(id); err != nil {
return err
}
log.Printf("Successfully removed network %s [%s]", network, id)
}
return nil
}
func removeExitedContainers(timeout int) error {
i := 0
dontKill := []string{"amp-agent", "amp-local"}
var containers []types.Container
if timeout == 0 {
timeout = 30 // default value
}
log.Println("waiting for all services to clear up...")
filter := filters.NewArgs()
filter.Add("is-task", "true")
filter.Add("label", "io.amp.role=infrastructure")
for i < timeout {
containers, err := Docker.GetClient().ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter})
if err != nil {
return err
}
if len(containers) == 0 {
log.Println("cleared up")
break
}
for _, c := range containers {
switch c.State {
case "exited":
log.Printf("Removing container %s [%s]\n", c.Names[0], c.Status)
err := Docker.GetClient().ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{})
if err != nil {
if strings.Contains(err.Error(), "already in progress") {
continue // leave it to Docker
}
return err
}
case "removing", "running":
// ignore it, _running_ containers will be killed after the loop
// _removing_ containers are in progress of deletion
default:
// this is not expected
log.Printf("Container %s found in status %s, %s\n", c.Names[0], c.Status, c.State)
}
}
i++
time.Sleep(1 * time.Second)
}
containers, err := Docker.GetClient().ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter})
if err != nil {
return err
}
if i == timeout {
log.Println("timing out")
log.Printf("%d containers left\n", len(containers))
}
//
for _, c := range containers {
for _, e := range dontKill {
if strings.Contains(c.Names[0], e) {
continue
}
}
log.Printf("Force removing container %s [%s]", c.Names[0], c.State)
if err := Docker.GetClient().ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{Force: true}); err != nil {
if strings.Contains(err.Error(), "already in progress") {
continue // leave it to Docker
}
return err
}
}
return nil
}
const ampVolumesPrefix = "amp_"
func removeVolumes(timeout int) error {
// volume remove timeout (sec)
if timeout == 0 {
timeout = 5 // default value
}
// List amp volumes
filter := opts.NewFilterOpt()
filter.Set("name=" + ampVolumesPrefix)
volumes, err := Docker.ListVolumes(filter)
if err != nil {
return nil
}
// Remove volumes
for _, volume := range volumes {
log.Printf("Removing volume [%s]... ", volume.Name)
if err := Docker.RemoveVolume(volume.Name, false, timeout); err != nil {
log.Println("Failed")
return err
}
}
return nil
} | return true, nil
} | random_line_split |
install.go | package cmd
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"path"
"path/filepath"
"regexp"
"strings"
"time"
"docker.io/go-docker"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/filters"
"github.com/appcelerator/amp/docker/cli/cli/command"
"github.com/appcelerator/amp/docker/cli/cli/command/stack"
"github.com/appcelerator/amp/docker/cli/opts"
"github.com/appcelerator/amp/docker/docker/pkg/term"
ampdocker "github.com/appcelerator/amp/pkg/docker"
"github.com/spf13/cobra"
)
const (
TARGET_SINGLE = "single"
TARGET_CLUSTER = "cluster"
)
type InstallOptions struct {
NoLogs bool
NoMetrics bool
NoProxy bool
}
var InstallOpts = &InstallOptions{}
var Docker = ampdocker.NewClient(ampdocker.DefaultURL, ampdocker.DefaultVersion)
func NewInstallCommand() *cobra.Command {
installCmd := &cobra.Command{
Use: "install",
Short: "Set up amp services in swarm environment",
RunE: Install,
}
return installCmd
}
func Install(cmd *cobra.Command, args []string) error {
stdin, stdout, stderr := term.StdStreams()
dockerCli := ampdocker.NewDockerCli(stdin, stdout, stderr)
if err := Docker.Connect(); err != nil {
return err
}
// Create initial secrets
createInitialSecrets()
// Create initial configs
createInitialConfigs()
// Create initial networks
createInitialNetworks()
namespace := "amp"
if len(args) > 0 && args[0] != "" {
namespace = args[0]
}
// Handle interrupt signal
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
log.Println("\nReceived an interrupt signal - removing AMP services")
stack.RunRemove(dockerCli, stack.RemoveOptions{Namespaces: []string{namespace}})
os.Exit(1)
}
}()
deploymentMode, err := serviceDeploymentMode(dockerCli.Client(), "amp.type.kv", "true")
if err != nil {
return err
}
stackFiles, err := getStackFiles("./stacks", deploymentMode)
if err != nil {
return err
}
for _, stackFile := range stackFiles {
if strings.Contains(stackFile, "logs") && InstallOpts.NoLogs ||
strings.Contains(stackFile, "metrics") && InstallOpts.NoMetrics ||
strings.Contains(stackFile, "proxy") && InstallOpts.NoProxy {
continue
}
log.Println("Deploying stack", stackFile)
if err := deploy(dockerCli, stackFile, namespace); err != nil {
stack.RunRemove(dockerCli, stack.RemoveOptions{Namespaces: []string{namespace}})
return err
}
}
return nil
}
// returns the deployment mode
// based on the number of nodes with the label passed as argument
// if number of nodes > 2, mode = cluster, else mode = single
func serviceDeploymentMode(c docker.APIClient, labelKey string, labelValue string) (string, error) {
// unfortunately filtering labels on NodeList won't work as expected, Cf. https://github.com/moby/moby/issues/27231
nodes, err := c.NodeList(context.Background(), types.NodeListOptions{})
if err != nil {
return "", err
}
matchingNodes := 0
for _, node := range nodes {
// node is a swarm.Node
for k, v := range node.Spec.Labels {
if k == labelKey {
if labelValue == "" || labelValue == v {
matchingNodes++
}
}
}
}
switch matchingNodes {
case 0:
return "", fmt.Errorf("can't find a node with label %s", labelKey)
case 1:
fallthrough
case 2:
return TARGET_SINGLE, nil
default:
return TARGET_CLUSTER, nil
}
}
// returns sorted list of yaml file pathnames
func getStackFiles(path string, deploymentMode string) ([]string, error) |
func deploy(d *command.DockerCli, stackfile string, namespace string) error {
if namespace == "" {
// use the stackfile basename as the default stack namespace
namespace = filepath.Base(stackfile)
namespace = strings.TrimSuffix(namespace, filepath.Ext(namespace))
}
options := stack.DeployOptions{
Namespace: namespace,
Composefile: stackfile,
ResolveImage: stack.ResolveImageAlways,
SendRegistryAuth: false,
Prune: false,
}
if err := stack.RunDeploy(d, options); err != nil {
return err
}
for _, err := range Docker.WaitOnStack(context.Background(), namespace, os.Stdout) {
if err != nil {
return err
}
}
return nil
}
// AMP configs map: Config name paired to config file in ./defaults
var ampConfigs = map[string]string{
"prometheus_alerts_rules": "prometheus_alerts.rules",
}
// This is the default configs path
const defaultConfigsPath = "defaults"
func createInitialConfigs() error {
// Computing config path
configPath := path.Join("/", defaultConfigsPath)
pe, err := pathExists(configPath)
if err != nil {
return err
}
if !pe {
configPath = defaultConfigsPath
}
configPath, err = filepath.Abs(configPath)
if err != nil {
return err
}
log.Println("Using the following path for configs:", configPath)
// Creating configs
for config, filename := range ampConfigs {
// Check if config already exists
exists, err := Docker.ConfigExists(config)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing config:", config)
continue
}
// Load config data
data, err := ioutil.ReadFile(path.Join(configPath, filename))
if err != nil {
return err
}
// Create config
if _, err := Docker.CreateConfig(config, data); err != nil {
return err
}
log.Println("Successfully created config:", config)
}
return nil
}
// AMP secrets map: Secret name paired to secret file in ./defaults
var ampSecrets = map[string]string{
"alertmanager_yml": "alertmanager.yml",
"amplifier_yml": "amplifier.yml",
"certificate_amp": "certificate.amp",
}
// This is the default secrets path
const defaultSecretsPath = "defaults"
// exists returns whether the given file or directory exists or not
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
func createInitialSecrets() error {
// Computing secret path
secretPath := path.Join("/", defaultSecretsPath)
pe, err := pathExists(secretPath)
if err != nil {
return err
}
if !pe {
secretPath = defaultSecretsPath
}
secretPath, err = filepath.Abs(secretPath)
if err != nil {
return err
}
log.Println("Using the following path for secrets:", secretPath)
// Creating secrets
for secret, filename := range ampSecrets {
// Check if secret already exists
exists, err := Docker.SecretExists(secret)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing secret:", secret)
continue
}
// Load secret data
data, err := ioutil.ReadFile(path.Join(secretPath, filename))
if err != nil {
return err
}
// Create secret
if _, err := Docker.CreateSecret(secret, data); err != nil {
return err
}
log.Println("Successfully created secret:", secret)
}
return nil
}
var ampnetworks = []string{"public", "monit", "core"}
func createInitialNetworks() error {
for _, network := range ampnetworks {
// Check if network already exists
exists, err := Docker.NetworkExists(network)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing network:", network)
continue
}
if _, err := Docker.CreateNetwork(network, true, true); err != nil {
return err
}
log.Println("Successfully created network:", network)
}
return nil
}
func removeInitialNetworks() error {
for _, network := range ampnetworks {
// Check if network already exists
id, err := Docker.NetworkID(network)
if err != nil {
return err
}
if id == "" {
continue // Skipping non existent network
}
// Remove network
if err := Docker.RemoveNetwork(id); err != nil {
return err
}
log.Printf("Successfully removed network %s [%s]", network, id)
}
return nil
}
func removeExitedContainers(timeout int) error {
i := 0
dontKill := []string{"amp-agent", "amp-local"}
var containers []types.Container
if timeout == 0 {
timeout = 30 // default value
}
log.Println("waiting for all services to clear up...")
filter := filters.NewArgs()
filter.Add("is-task", "true")
filter.Add("label", "io.amp.role=infrastructure")
for i < timeout {
containers, err := Docker.GetClient().ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter})
if err != nil {
return err
}
if len(containers) == 0 {
log.Println("cleared up")
break
}
for _, c := range containers {
switch c.State {
case "exited":
log.Printf("Removing container %s [%s]\n", c.Names[0], c.Status)
err := Docker.GetClient().ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{})
if err != nil {
if strings.Contains(err.Error(), "already in progress") {
continue // leave it to Docker
}
return err
}
case "removing", "running":
// ignore it, _running_ containers will be killed after the loop
// _removing_ containers are in progress of deletion
default:
// this is not expected
log.Printf("Container %s found in status %s, %s\n", c.Names[0], c.Status, c.State)
}
}
i++
time.Sleep(1 * time.Second)
}
containers, err := Docker.GetClient().ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter})
if err != nil {
return err
}
if i == timeout {
log.Println("timing out")
log.Printf("%d containers left\n", len(containers))
}
//
for _, c := range containers {
for _, e := range dontKill {
if strings.Contains(c.Names[0], e) {
continue
}
}
log.Printf("Force removing container %s [%s]", c.Names[0], c.State)
if err := Docker.GetClient().ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{Force: true}); err != nil {
if strings.Contains(err.Error(), "already in progress") {
continue // leave it to Docker
}
return err
}
}
return nil
}
const ampVolumesPrefix = "amp_"
func removeVolumes(timeout int) error {
// volume remove timeout (sec)
if timeout == 0 {
timeout = 5 // default value
}
// List amp volumes
filter := opts.NewFilterOpt()
filter.Set("name=" + ampVolumesPrefix)
volumes, err := Docker.ListVolumes(filter)
if err != nil {
return nil
}
// Remove volumes
for _, volume := range volumes {
log.Printf("Removing volume [%s]... ", volume.Name)
if err := Docker.RemoveVolume(volume.Name, false, timeout); err != nil {
log.Println("Failed")
return err
}
}
return nil
}
| {
if path == "" {
path = "./stacks"
}
path += "/" + deploymentMode
// a bit more work but we can't just use filepath.Glob
// since we need to match both *.yml and *.yaml
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
stackfiles := []string{}
for _, f := range files {
name := f.Name()
if matched, _ := regexp.MatchString("\\.ya?ml$", name); matched {
stackfiles = append(stackfiles, filepath.Join(path, name))
}
}
return stackfiles, nil
} | identifier_body |
install.go | package cmd
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"path"
"path/filepath"
"regexp"
"strings"
"time"
"docker.io/go-docker"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/filters"
"github.com/appcelerator/amp/docker/cli/cli/command"
"github.com/appcelerator/amp/docker/cli/cli/command/stack"
"github.com/appcelerator/amp/docker/cli/opts"
"github.com/appcelerator/amp/docker/docker/pkg/term"
ampdocker "github.com/appcelerator/amp/pkg/docker"
"github.com/spf13/cobra"
)
const (
TARGET_SINGLE = "single"
TARGET_CLUSTER = "cluster"
)
type InstallOptions struct {
NoLogs bool
NoMetrics bool
NoProxy bool
}
var InstallOpts = &InstallOptions{}
var Docker = ampdocker.NewClient(ampdocker.DefaultURL, ampdocker.DefaultVersion)
func NewInstallCommand() *cobra.Command {
installCmd := &cobra.Command{
Use: "install",
Short: "Set up amp services in swarm environment",
RunE: Install,
}
return installCmd
}
func Install(cmd *cobra.Command, args []string) error {
stdin, stdout, stderr := term.StdStreams()
dockerCli := ampdocker.NewDockerCli(stdin, stdout, stderr)
if err := Docker.Connect(); err != nil {
return err
}
// Create initial secrets
createInitialSecrets()
// Create initial configs
createInitialConfigs()
// Create initial networks
createInitialNetworks()
namespace := "amp"
if len(args) > 0 && args[0] != "" {
namespace = args[0]
}
// Handle interrupt signal
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
log.Println("\nReceived an interrupt signal - removing AMP services")
stack.RunRemove(dockerCli, stack.RemoveOptions{Namespaces: []string{namespace}})
os.Exit(1)
}
}()
deploymentMode, err := serviceDeploymentMode(dockerCli.Client(), "amp.type.kv", "true")
if err != nil {
return err
}
stackFiles, err := getStackFiles("./stacks", deploymentMode)
if err != nil {
return err
}
for _, stackFile := range stackFiles {
if strings.Contains(stackFile, "logs") && InstallOpts.NoLogs ||
strings.Contains(stackFile, "metrics") && InstallOpts.NoMetrics ||
strings.Contains(stackFile, "proxy") && InstallOpts.NoProxy {
continue
}
log.Println("Deploying stack", stackFile)
if err := deploy(dockerCli, stackFile, namespace); err != nil {
stack.RunRemove(dockerCli, stack.RemoveOptions{Namespaces: []string{namespace}})
return err
}
}
return nil
}
// returns the deployment mode
// based on the number of nodes with the label passed as argument
// if number of nodes > 2, mode = cluster, else mode = single
func serviceDeploymentMode(c docker.APIClient, labelKey string, labelValue string) (string, error) {
// unfortunately filtering labels on NodeList won't work as expected, Cf. https://github.com/moby/moby/issues/27231
nodes, err := c.NodeList(context.Background(), types.NodeListOptions{})
if err != nil {
return "", err
}
matchingNodes := 0
for _, node := range nodes {
// node is a swarm.Node
for k, v := range node.Spec.Labels {
if k == labelKey {
if labelValue == "" || labelValue == v {
matchingNodes++
}
}
}
}
switch matchingNodes {
case 0:
return "", fmt.Errorf("can't find a node with label %s", labelKey)
case 1:
fallthrough
case 2:
return TARGET_SINGLE, nil
default:
return TARGET_CLUSTER, nil
}
}
// returns sorted list of yaml file pathnames
func getStackFiles(path string, deploymentMode string) ([]string, error) {
if path == "" {
path = "./stacks"
}
path += "/" + deploymentMode
// a bit more work but we can't just use filepath.Glob
// since we need to match both *.yml and *.yaml
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
stackfiles := []string{}
for _, f := range files {
name := f.Name()
if matched, _ := regexp.MatchString("\\.ya?ml$", name); matched {
stackfiles = append(stackfiles, filepath.Join(path, name))
}
}
return stackfiles, nil
}
func deploy(d *command.DockerCli, stackfile string, namespace string) error {
if namespace == "" {
// use the stackfile basename as the default stack namespace
namespace = filepath.Base(stackfile)
namespace = strings.TrimSuffix(namespace, filepath.Ext(namespace))
}
options := stack.DeployOptions{
Namespace: namespace,
Composefile: stackfile,
ResolveImage: stack.ResolveImageAlways,
SendRegistryAuth: false,
Prune: false,
}
if err := stack.RunDeploy(d, options); err != nil {
return err
}
for _, err := range Docker.WaitOnStack(context.Background(), namespace, os.Stdout) {
if err != nil {
return err
}
}
return nil
}
// AMP configs map: Config name paired to config file in ./defaults
var ampConfigs = map[string]string{
"prometheus_alerts_rules": "prometheus_alerts.rules",
}
// This is the default configs path
const defaultConfigsPath = "defaults"
func createInitialConfigs() error {
// Computing config path
configPath := path.Join("/", defaultConfigsPath)
pe, err := pathExists(configPath)
if err != nil {
return err
}
if !pe {
configPath = defaultConfigsPath
}
configPath, err = filepath.Abs(configPath)
if err != nil {
return err
}
log.Println("Using the following path for configs:", configPath)
// Creating configs
for config, filename := range ampConfigs {
// Check if config already exists
exists, err := Docker.ConfigExists(config)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing config:", config)
continue
}
// Load config data
data, err := ioutil.ReadFile(path.Join(configPath, filename))
if err != nil {
return err
}
// Create config
if _, err := Docker.CreateConfig(config, data); err != nil {
return err
}
log.Println("Successfully created config:", config)
}
return nil
}
// AMP secrets map: Secret name paired to secret file in ./defaults
var ampSecrets = map[string]string{
"alertmanager_yml": "alertmanager.yml",
"amplifier_yml": "amplifier.yml",
"certificate_amp": "certificate.amp",
}
// This is the default secrets path
const defaultSecretsPath = "defaults"
// exists returns whether the given file or directory exists or not
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
func createInitialSecrets() error {
// Computing secret path
secretPath := path.Join("/", defaultSecretsPath)
pe, err := pathExists(secretPath)
if err != nil {
return err
}
if !pe {
secretPath = defaultSecretsPath
}
secretPath, err = filepath.Abs(secretPath)
if err != nil {
return err
}
log.Println("Using the following path for secrets:", secretPath)
// Creating secrets
for secret, filename := range ampSecrets {
// Check if secret already exists
exists, err := Docker.SecretExists(secret)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing secret:", secret)
continue
}
// Load secret data
data, err := ioutil.ReadFile(path.Join(secretPath, filename))
if err != nil {
return err
}
// Create secret
if _, err := Docker.CreateSecret(secret, data); err != nil {
return err
}
log.Println("Successfully created secret:", secret)
}
return nil
}
var ampnetworks = []string{"public", "monit", "core"}
func createInitialNetworks() error {
for _, network := range ampnetworks {
// Check if network already exists
exists, err := Docker.NetworkExists(network)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing network:", network)
continue
}
if _, err := Docker.CreateNetwork(network, true, true); err != nil {
return err
}
log.Println("Successfully created network:", network)
}
return nil
}
func removeInitialNetworks() error {
for _, network := range ampnetworks {
// Check if network already exists
id, err := Docker.NetworkID(network)
if err != nil {
return err
}
if id == "" {
continue // Skipping non existent network
}
// Remove network
if err := Docker.RemoveNetwork(id); err != nil {
return err
}
log.Printf("Successfully removed network %s [%s]", network, id)
}
return nil
}
func removeExitedContainers(timeout int) error {
i := 0
dontKill := []string{"amp-agent", "amp-local"}
var containers []types.Container
if timeout == 0 {
timeout = 30 // default value
}
log.Println("waiting for all services to clear up...")
filter := filters.NewArgs()
filter.Add("is-task", "true")
filter.Add("label", "io.amp.role=infrastructure")
for i < timeout {
containers, err := Docker.GetClient().ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter})
if err != nil {
return err
}
if len(containers) == 0 {
log.Println("cleared up")
break
}
for _, c := range containers {
switch c.State {
case "exited":
log.Printf("Removing container %s [%s]\n", c.Names[0], c.Status)
err := Docker.GetClient().ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{})
if err != nil {
if strings.Contains(err.Error(), "already in progress") {
continue // leave it to Docker
}
return err
}
case "removing", "running":
// ignore it, _running_ containers will be killed after the loop
// _removing_ containers are in progress of deletion
default:
// this is not expected
log.Printf("Container %s found in status %s, %s\n", c.Names[0], c.Status, c.State)
}
}
i++
time.Sleep(1 * time.Second)
}
containers, err := Docker.GetClient().ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter})
if err != nil {
return err
}
if i == timeout {
log.Println("timing out")
log.Printf("%d containers left\n", len(containers))
}
//
for _, c := range containers {
for _, e := range dontKill {
if strings.Contains(c.Names[0], e) {
continue
}
}
log.Printf("Force removing container %s [%s]", c.Names[0], c.State)
if err := Docker.GetClient().ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{Force: true}); err != nil {
if strings.Contains(err.Error(), "already in progress") {
continue // leave it to Docker
}
return err
}
}
return nil
}
const ampVolumesPrefix = "amp_"
func removeVolumes(timeout int) error {
// volume remove timeout (sec)
if timeout == 0 {
timeout = 5 // default value
}
// List amp volumes
filter := opts.NewFilterOpt()
filter.Set("name=" + ampVolumesPrefix)
volumes, err := Docker.ListVolumes(filter)
if err != nil |
// Remove volumes
for _, volume := range volumes {
log.Printf("Removing volume [%s]... ", volume.Name)
if err := Docker.RemoveVolume(volume.Name, false, timeout); err != nil {
log.Println("Failed")
return err
}
}
return nil
}
| {
return nil
} | conditional_block |
install.go | package cmd
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"path"
"path/filepath"
"regexp"
"strings"
"time"
"docker.io/go-docker"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/filters"
"github.com/appcelerator/amp/docker/cli/cli/command"
"github.com/appcelerator/amp/docker/cli/cli/command/stack"
"github.com/appcelerator/amp/docker/cli/opts"
"github.com/appcelerator/amp/docker/docker/pkg/term"
ampdocker "github.com/appcelerator/amp/pkg/docker"
"github.com/spf13/cobra"
)
const (
TARGET_SINGLE = "single"
TARGET_CLUSTER = "cluster"
)
type InstallOptions struct {
NoLogs bool
NoMetrics bool
NoProxy bool
}
var InstallOpts = &InstallOptions{}
var Docker = ampdocker.NewClient(ampdocker.DefaultURL, ampdocker.DefaultVersion)
func NewInstallCommand() *cobra.Command {
installCmd := &cobra.Command{
Use: "install",
Short: "Set up amp services in swarm environment",
RunE: Install,
}
return installCmd
}
func Install(cmd *cobra.Command, args []string) error {
stdin, stdout, stderr := term.StdStreams()
dockerCli := ampdocker.NewDockerCli(stdin, stdout, stderr)
if err := Docker.Connect(); err != nil {
return err
}
// Create initial secrets
createInitialSecrets()
// Create initial configs
createInitialConfigs()
// Create initial networks
createInitialNetworks()
namespace := "amp"
if len(args) > 0 && args[0] != "" {
namespace = args[0]
}
// Handle interrupt signal
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
log.Println("\nReceived an interrupt signal - removing AMP services")
stack.RunRemove(dockerCli, stack.RemoveOptions{Namespaces: []string{namespace}})
os.Exit(1)
}
}()
deploymentMode, err := serviceDeploymentMode(dockerCli.Client(), "amp.type.kv", "true")
if err != nil {
return err
}
stackFiles, err := getStackFiles("./stacks", deploymentMode)
if err != nil {
return err
}
for _, stackFile := range stackFiles {
if strings.Contains(stackFile, "logs") && InstallOpts.NoLogs ||
strings.Contains(stackFile, "metrics") && InstallOpts.NoMetrics ||
strings.Contains(stackFile, "proxy") && InstallOpts.NoProxy {
continue
}
log.Println("Deploying stack", stackFile)
if err := deploy(dockerCli, stackFile, namespace); err != nil {
stack.RunRemove(dockerCli, stack.RemoveOptions{Namespaces: []string{namespace}})
return err
}
}
return nil
}
// returns the deployment mode
// based on the number of nodes with the label passed as argument
// if number of nodes > 2, mode = cluster, else mode = single
func serviceDeploymentMode(c docker.APIClient, labelKey string, labelValue string) (string, error) {
// unfortunately filtering labels on NodeList won't work as expected, Cf. https://github.com/moby/moby/issues/27231
nodes, err := c.NodeList(context.Background(), types.NodeListOptions{})
if err != nil {
return "", err
}
matchingNodes := 0
for _, node := range nodes {
// node is a swarm.Node
for k, v := range node.Spec.Labels {
if k == labelKey {
if labelValue == "" || labelValue == v {
matchingNodes++
}
}
}
}
switch matchingNodes {
case 0:
return "", fmt.Errorf("can't find a node with label %s", labelKey)
case 1:
fallthrough
case 2:
return TARGET_SINGLE, nil
default:
return TARGET_CLUSTER, nil
}
}
// returns sorted list of yaml file pathnames
func | (path string, deploymentMode string) ([]string, error) {
if path == "" {
path = "./stacks"
}
path += "/" + deploymentMode
// a bit more work but we can't just use filepath.Glob
// since we need to match both *.yml and *.yaml
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
stackfiles := []string{}
for _, f := range files {
name := f.Name()
if matched, _ := regexp.MatchString("\\.ya?ml$", name); matched {
stackfiles = append(stackfiles, filepath.Join(path, name))
}
}
return stackfiles, nil
}
func deploy(d *command.DockerCli, stackfile string, namespace string) error {
if namespace == "" {
// use the stackfile basename as the default stack namespace
namespace = filepath.Base(stackfile)
namespace = strings.TrimSuffix(namespace, filepath.Ext(namespace))
}
options := stack.DeployOptions{
Namespace: namespace,
Composefile: stackfile,
ResolveImage: stack.ResolveImageAlways,
SendRegistryAuth: false,
Prune: false,
}
if err := stack.RunDeploy(d, options); err != nil {
return err
}
for _, err := range Docker.WaitOnStack(context.Background(), namespace, os.Stdout) {
if err != nil {
return err
}
}
return nil
}
// AMP configs map: Config name paired to config file in ./defaults
var ampConfigs = map[string]string{
"prometheus_alerts_rules": "prometheus_alerts.rules",
}
// This is the default configs path
const defaultConfigsPath = "defaults"
func createInitialConfigs() error {
// Computing config path
configPath := path.Join("/", defaultConfigsPath)
pe, err := pathExists(configPath)
if err != nil {
return err
}
if !pe {
configPath = defaultConfigsPath
}
configPath, err = filepath.Abs(configPath)
if err != nil {
return err
}
log.Println("Using the following path for configs:", configPath)
// Creating configs
for config, filename := range ampConfigs {
// Check if config already exists
exists, err := Docker.ConfigExists(config)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing config:", config)
continue
}
// Load config data
data, err := ioutil.ReadFile(path.Join(configPath, filename))
if err != nil {
return err
}
// Create config
if _, err := Docker.CreateConfig(config, data); err != nil {
return err
}
log.Println("Successfully created config:", config)
}
return nil
}
// AMP secrets map: Secret name paired to secret file in ./defaults
var ampSecrets = map[string]string{
"alertmanager_yml": "alertmanager.yml",
"amplifier_yml": "amplifier.yml",
"certificate_amp": "certificate.amp",
}
// This is the default secrets path
const defaultSecretsPath = "defaults"
// exists returns whether the given file or directory exists or not
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
func createInitialSecrets() error {
// Computing secret path
secretPath := path.Join("/", defaultSecretsPath)
pe, err := pathExists(secretPath)
if err != nil {
return err
}
if !pe {
secretPath = defaultSecretsPath
}
secretPath, err = filepath.Abs(secretPath)
if err != nil {
return err
}
log.Println("Using the following path for secrets:", secretPath)
// Creating secrets
for secret, filename := range ampSecrets {
// Check if secret already exists
exists, err := Docker.SecretExists(secret)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing secret:", secret)
continue
}
// Load secret data
data, err := ioutil.ReadFile(path.Join(secretPath, filename))
if err != nil {
return err
}
// Create secret
if _, err := Docker.CreateSecret(secret, data); err != nil {
return err
}
log.Println("Successfully created secret:", secret)
}
return nil
}
var ampnetworks = []string{"public", "monit", "core"}
func createInitialNetworks() error {
for _, network := range ampnetworks {
// Check if network already exists
exists, err := Docker.NetworkExists(network)
if err != nil {
return err
}
if exists {
log.Println("Skipping already existing network:", network)
continue
}
if _, err := Docker.CreateNetwork(network, true, true); err != nil {
return err
}
log.Println("Successfully created network:", network)
}
return nil
}
func removeInitialNetworks() error {
for _, network := range ampnetworks {
// Check if network already exists
id, err := Docker.NetworkID(network)
if err != nil {
return err
}
if id == "" {
continue // Skipping non existent network
}
// Remove network
if err := Docker.RemoveNetwork(id); err != nil {
return err
}
log.Printf("Successfully removed network %s [%s]", network, id)
}
return nil
}
func removeExitedContainers(timeout int) error {
i := 0
dontKill := []string{"amp-agent", "amp-local"}
var containers []types.Container
if timeout == 0 {
timeout = 30 // default value
}
log.Println("waiting for all services to clear up...")
filter := filters.NewArgs()
filter.Add("is-task", "true")
filter.Add("label", "io.amp.role=infrastructure")
for i < timeout {
containers, err := Docker.GetClient().ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter})
if err != nil {
return err
}
if len(containers) == 0 {
log.Println("cleared up")
break
}
for _, c := range containers {
switch c.State {
case "exited":
log.Printf("Removing container %s [%s]\n", c.Names[0], c.Status)
err := Docker.GetClient().ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{})
if err != nil {
if strings.Contains(err.Error(), "already in progress") {
continue // leave it to Docker
}
return err
}
case "removing", "running":
// ignore it, _running_ containers will be killed after the loop
// _removing_ containers are in progress of deletion
default:
// this is not expected
log.Printf("Container %s found in status %s, %s\n", c.Names[0], c.Status, c.State)
}
}
i++
time.Sleep(1 * time.Second)
}
containers, err := Docker.GetClient().ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filter})
if err != nil {
return err
}
if i == timeout {
log.Println("timing out")
log.Printf("%d containers left\n", len(containers))
}
//
for _, c := range containers {
for _, e := range dontKill {
if strings.Contains(c.Names[0], e) {
continue
}
}
log.Printf("Force removing container %s [%s]", c.Names[0], c.State)
if err := Docker.GetClient().ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{Force: true}); err != nil {
if strings.Contains(err.Error(), "already in progress") {
continue // leave it to Docker
}
return err
}
}
return nil
}
const ampVolumesPrefix = "amp_"
func removeVolumes(timeout int) error {
// volume remove timeout (sec)
if timeout == 0 {
timeout = 5 // default value
}
// List amp volumes
filter := opts.NewFilterOpt()
filter.Set("name=" + ampVolumesPrefix)
volumes, err := Docker.ListVolumes(filter)
if err != nil {
return nil
}
// Remove volumes
for _, volume := range volumes {
log.Printf("Removing volume [%s]... ", volume.Name)
if err := Docker.RemoveVolume(volume.Name, false, timeout); err != nil {
log.Println("Failed")
return err
}
}
return nil
}
| getStackFiles | identifier_name |
decoder.rs | use std::cmp;
use std::io::{self, Read};
use encoding_rs::{Decoder, Encoding, UTF_8};
/// A BOM is at least 2 bytes and at most 3 bytes.
///
/// If fewer than 2 bytes are available to be read at the beginning of a
/// reader, then a BOM is `None`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct Bom {
bytes: [u8; 3],
len: usize,
}
impl Bom {
fn as_slice(&self) -> &[u8] {
&self.bytes[0..self.len]
}
fn decoder(&self) -> Option<Decoder> {
let bom = self.as_slice();
if bom.len() < 3 {
return None;
}
if let Some((enc, _)) = Encoding::for_bom(bom) {
if enc != UTF_8 {
return Some(enc.new_decoder_with_bom_removal());
}
}
None
}
}
/// `BomPeeker` wraps `R` and satisfies the `io::Read` interface while also
/// providing a peek at the BOM if one exists. Peeking at the BOM does not
/// advance the reader.
struct BomPeeker<R> {
rdr: R,
bom: Option<Bom>,
nread: usize,
}
impl<R: io::Read> BomPeeker<R> {
/// Create a new BomPeeker.
///
/// The first three bytes can be read using the `peek_bom` method, but
/// will not advance the reader.
fn new(rdr: R) -> BomPeeker<R> {
BomPeeker { rdr: rdr, bom: None, nread: 0 }
}
/// Peek at the first three bytes of the underlying reader.
///
/// This does not advance the reader provided by `BomPeeker`.
///
/// If the underlying reader does not have at least two bytes available,
/// then `None` is returned.
fn peek_bom(&mut self) -> io::Result<Bom> {
if let Some(bom) = self.bom {
return Ok(bom);
}
self.bom = Some(Bom { bytes: [0; 3], len: 0 });
let mut buf = [0u8; 3];
let bom_len = read_full(&mut self.rdr, &mut buf)?;
self.bom = Some(Bom { bytes: buf, len: bom_len });
Ok(self.bom.unwrap())
}
}
impl<R: io::Read> io::Read for BomPeeker<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.nread < 3 {
let bom = self.peek_bom()?;
let bom = bom.as_slice();
if self.nread < bom.len() {
let rest = &bom[self.nread..];
let len = cmp::min(buf.len(), rest.len());
buf[..len].copy_from_slice(&rest[..len]);
self.nread += len;
return Ok(len);
}
}
let nread = self.rdr.read(buf)?;
self.nread += nread;
Ok(nread)
}
}
/// Like `io::Read::read_exact`, except it never returns `UnexpectedEof` and
/// instead returns the number of bytes read if EOF is seen before filling
/// `buf`.
fn read_full<R: io::Read>(
mut rdr: R,
mut buf: &mut [u8],
) -> io::Result<usize> {
let mut nread = 0;
while !buf.is_empty() {
match rdr.read(buf) {
Ok(0) => break, | Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
Ok(nread)
}
/// A reader that transcodes to UTF-8. The source encoding is determined by
/// inspecting the BOM from the stream read from `R`, if one exists. If a
/// UTF-16 BOM exists, then the source stream is transcoded to UTF-8 with
/// invalid UTF-16 sequences translated to the Unicode replacement character.
/// In all other cases, the underlying reader is passed through unchanged.
///
/// `R` is the type of the underlying reader and `B` is the type of an internal
/// buffer used to store the results of transcoding.
///
/// Note that not all methods on `io::Read` work with this implementation.
/// For example, the `bytes` adapter method attempts to read a single byte at
/// a time, but this implementation requires a buffer of size at least `4`. If
/// a buffer of size less than 4 is given, then an error is returned.
pub struct DecodeReader<R, B> {
/// The underlying reader, wrapped in a peeker for reading a BOM if one
/// exists.
rdr: BomPeeker<R>,
/// The internal buffer to store transcoded bytes before they are read by
/// callers.
buf: B,
/// The current position in `buf`. Subsequent reads start here.
pos: usize,
/// The number of transcoded bytes in `buf`. Subsequent reads end here.
buflen: usize,
/// Whether this is the first read or not (in which we inspect the BOM).
first: bool,
/// Whether a "last" read has occurred. After this point, EOF will always
/// be returned.
last: bool,
/// The underlying text decoder derived from the BOM, if one exists.
decoder: Option<Decoder>,
}
impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
/// Create a new transcoder that converts a source stream to valid UTF-8.
///
/// If an encoding is specified, then it is used to transcode `rdr` to
/// UTF-8. Otherwise, if no encoding is specified, and if a UTF-16 BOM is
/// found, then the corresponding UTF-16 encoding is used to transcode
/// `rdr` to UTF-8. In all other cases, `rdr` is assumed to be at least
/// ASCII-compatible and passed through untouched.
///
/// Errors in the encoding of `rdr` are handled with the Unicode
/// replacement character. If no encoding of `rdr` is specified, then
/// errors are not handled.
pub fn new(
rdr: R,
buf: B,
enc: Option<&'static Encoding>,
) -> DecodeReader<R, B> {
DecodeReader {
rdr: BomPeeker::new(rdr),
buf: buf,
buflen: 0,
pos: 0,
first: enc.is_none(),
last: false,
decoder: enc.map(|enc| enc.new_decoder_with_bom_removal()),
}
}
/// Fill the internal buffer from the underlying reader.
///
/// If there are unread bytes in the internal buffer, then we move them
/// to the beginning of the internal buffer and fill the remainder.
///
/// If the internal buffer is too small to read additional bytes, then an
/// error is returned.
#[inline(always)] // massive perf benefit (???)
fn fill(&mut self) -> io::Result<()> {
if self.pos < self.buflen {
if self.buflen >= self.buf.as_mut().len() {
return Err(io::Error::new(
io::ErrorKind::Other,
"DecodeReader: internal buffer exhausted"));
}
let newlen = self.buflen - self.pos;
let mut tmp = Vec::with_capacity(newlen);
tmp.extend_from_slice(&self.buf.as_mut()[self.pos..self.buflen]);
self.buf.as_mut()[..newlen].copy_from_slice(&tmp);
self.buflen = newlen;
} else {
self.buflen = 0;
}
self.pos = 0;
self.buflen +=
self.rdr.read(&mut self.buf.as_mut()[self.buflen..])?;
Ok(())
}
/// Transcode the inner stream to UTF-8 in `buf`. This assumes that there
/// is a decoder capable of transcoding the inner stream to UTF-8. This
/// returns the number of bytes written to `buf`.
///
/// When this function returns, exactly one of the following things will
/// be true:
///
/// 1. A non-zero number of bytes were written to `buf`.
/// 2. The underlying reader reached EOF.
/// 3. An error is returned: the internal buffer ran out of room.
/// 4. An I/O error occurred.
///
/// Note that `buf` must have at least 4 bytes of space.
fn transcode(&mut self, buf: &mut [u8]) -> io::Result<usize> {
assert!(buf.len() >= 4);
if self.last {
return Ok(0);
}
if self.pos >= self.buflen {
self.fill()?;
}
let mut nwrite = 0;
loop {
let (_, nin, nout, _) =
self.decoder.as_mut().unwrap().decode_to_utf8(
&self.buf.as_mut()[self.pos..self.buflen], buf, false);
self.pos += nin;
nwrite += nout;
// If we've written at least one byte to the caller-provided
// buffer, then our mission is complete.
if nwrite > 0 {
break;
}
// Otherwise, we know that our internal buffer has insufficient
// data to transcode at least one char, so we attempt to refill it.
self.fill()?;
// Quit on EOF.
if self.buflen == 0 {
self.pos = 0;
self.last = true;
let (_, _, nout, _) =
self.decoder.as_mut().unwrap().decode_to_utf8(
&[], buf, true);
return Ok(nout);
}
}
Ok(nwrite)
}
#[inline(never)] // impacts perf...
fn detect(&mut self) -> io::Result<()> {
let bom = self.rdr.peek_bom()?;
self.decoder = bom.decoder();
Ok(())
}
}
impl<R: io::Read, B: AsMut<[u8]>> io::Read for DecodeReader<R, B> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.first {
self.first = false;
self.detect()?;
}
if self.decoder.is_none() {
return self.rdr.read(buf);
}
// When decoding UTF-8, we need at least 4 bytes of space to guarantee
// that we can decode at least one codepoint. If we don't have it, we
// can either return `0` for the number of bytes read or return an
// error. Since `0` would be interpreted as a possibly premature EOF,
// we opt for an error.
if buf.len() < 4 {
return Err(io::Error::new(
io::ErrorKind::Other,
"DecodeReader: byte buffer must have length at least 4"));
}
self.transcode(buf)
}
}
#[cfg(test)]
mod tests {
use std::io::Read;
use encoding_rs::Encoding;
use super::{Bom, BomPeeker, DecodeReader};
fn read_to_string<R: Read>(mut rdr: R) -> String {
let mut s = String::new();
rdr.read_to_string(&mut s).unwrap();
s
}
#[test]
fn peeker_empty() {
let buf = [];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(Bom { bytes: [0; 3], len: 0}, peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_one() {
let buf = [1];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 0, 0], len: 1},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_two() {
let buf = [1, 2];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 0], len: 2},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(2, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_three() {
let buf = [1, 2, 3];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 3], len: 3},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(3, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(3, tmp[2]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_four() {
let buf = [1, 2, 3, 4];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 3], len: 3},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(3, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(3, tmp[2]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(4, tmp[0]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_one_at_a_time() {
let buf = [1, 2, 3, 4];
let mut peeker = BomPeeker::new(&buf[..]);
let mut tmp = [0; 1];
assert_eq!(0, peeker.read(&mut tmp[..0]).unwrap());
assert_eq!(0, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(2, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(3, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(4, tmp[0]);
}
// In cases where all we have is a bom, we expect the bytes to be
// passed through unchanged.
#[test]
fn trans_utf16_bom() {
let srcbuf = vec![0xFF, 0xFE];
let mut dstbuf = vec![0; 8 * (1<<10)];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
let srcbuf = vec![0xFE, 0xFF];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
let srcbuf = vec![0xEF, 0xBB, 0xBF];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
}
// Test basic UTF-16 decoding.
#[test]
fn trans_utf16_basic() {
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a", read_to_string(&mut rdr));
let srcbuf = vec![0xFE, 0xFF, 0x00, 0x61];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a", read_to_string(&mut rdr));
}
// Test incomplete UTF-16 decoding. This ensures we see a replacement char
// if the stream ends with an unpaired code unit.
#[test]
fn trans_utf16_incomplete() {
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00, 0x00];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a\u{FFFD}", read_to_string(&mut rdr));
}
macro_rules! test_trans_simple {
($name:ident, $enc:expr, $srcbytes:expr, $dst:expr) => {
#[test]
fn $name() {
let srcbuf = &$srcbytes[..];
let enc = Encoding::for_label($enc.as_bytes());
let mut rdr = DecodeReader::new(
&*srcbuf, vec![0; 8 * (1<<10)], enc);
assert_eq!($dst, read_to_string(&mut rdr));
}
}
}
// This isn't exhaustive obviously, but it lets us test base level support.
test_trans_simple!(trans_simple_auto, "does not exist", b"\xD0\x96", "Ж");
test_trans_simple!(trans_simple_utf8, "utf-8", b"\xD0\x96", "Ж");
test_trans_simple!(trans_simple_utf16le, "utf-16le", b"\x16\x04", "Ж");
test_trans_simple!(trans_simple_utf16be, "utf-16be", b"\x04\x16", "Ж");
test_trans_simple!(trans_simple_chinese, "chinese", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_korean, "korean", b"\xAC\xA8", "Ж");
test_trans_simple!(
trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж");
test_trans_simple!(trans_simple_gbk, "gbk", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_sjis, "sjis", b"\x84\x47", "Ж");
test_trans_simple!(trans_simple_eucjp, "euc-jp", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_latin1, "latin1", b"\xA9", "©");
} | Ok(n) => {
nread += n;
let tmp = buf;
buf = &mut tmp[n..];
} | random_line_split |
decoder.rs | use std::cmp;
use std::io::{self, Read};
use encoding_rs::{Decoder, Encoding, UTF_8};
/// A BOM is at least 2 bytes and at most 3 bytes.
///
/// If fewer than 2 bytes are available to be read at the beginning of a
/// reader, then a BOM is `None`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct Bom {
bytes: [u8; 3],
len: usize,
}
impl Bom {
fn as_slice(&self) -> &[u8] {
&self.bytes[0..self.len]
}
fn decoder(&self) -> Option<Decoder> {
let bom = self.as_slice();
if bom.len() < 3 {
return None;
}
if let Some((enc, _)) = Encoding::for_bom(bom) {
if enc != UTF_8 {
return Some(enc.new_decoder_with_bom_removal());
}
}
None
}
}
/// `BomPeeker` wraps `R` and satisfies the `io::Read` interface while also
/// providing a peek at the BOM if one exists. Peeking at the BOM does not
/// advance the reader.
struct BomPeeker<R> {
rdr: R,
bom: Option<Bom>,
nread: usize,
}
impl<R: io::Read> BomPeeker<R> {
/// Create a new BomPeeker.
///
/// The first three bytes can be read using the `peek_bom` method, but
/// will not advance the reader.
fn new(rdr: R) -> BomPeeker<R> {
BomPeeker { rdr: rdr, bom: None, nread: 0 }
}
/// Peek at the first three bytes of the underlying reader.
///
/// This does not advance the reader provided by `BomPeeker`.
///
/// If the underlying reader does not have at least two bytes available,
/// then `None` is returned.
fn peek_bom(&mut self) -> io::Result<Bom> {
if let Some(bom) = self.bom {
return Ok(bom);
}
self.bom = Some(Bom { bytes: [0; 3], len: 0 });
let mut buf = [0u8; 3];
let bom_len = read_full(&mut self.rdr, &mut buf)?;
self.bom = Some(Bom { bytes: buf, len: bom_len });
Ok(self.bom.unwrap())
}
}
impl<R: io::Read> io::Read for BomPeeker<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.nread < 3 {
let bom = self.peek_bom()?;
let bom = bom.as_slice();
if self.nread < bom.len() {
let rest = &bom[self.nread..];
let len = cmp::min(buf.len(), rest.len());
buf[..len].copy_from_slice(&rest[..len]);
self.nread += len;
return Ok(len);
}
}
let nread = self.rdr.read(buf)?;
self.nread += nread;
Ok(nread)
}
}
/// Like `io::Read::read_exact`, except it never returns `UnexpectedEof` and
/// instead returns the number of bytes read if EOF is seen before filling
/// `buf`.
fn read_full<R: io::Read>(
mut rdr: R,
mut buf: &mut [u8],
) -> io::Result<usize> {
let mut nread = 0;
while !buf.is_empty() {
match rdr.read(buf) {
Ok(0) => break,
Ok(n) => {
nread += n;
let tmp = buf;
buf = &mut tmp[n..];
}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
Ok(nread)
}
/// A reader that transcodes to UTF-8. The source encoding is determined by
/// inspecting the BOM from the stream read from `R`, if one exists. If a
/// UTF-16 BOM exists, then the source stream is transcoded to UTF-8 with
/// invalid UTF-16 sequences translated to the Unicode replacement character.
/// In all other cases, the underlying reader is passed through unchanged.
///
/// `R` is the type of the underlying reader and `B` is the type of an internal
/// buffer used to store the results of transcoding.
///
/// Note that not all methods on `io::Read` work with this implementation.
/// For example, the `bytes` adapter method attempts to read a single byte at
/// a time, but this implementation requires a buffer of size at least `4`. If
/// a buffer of size less than 4 is given, then an error is returned.
pub struct DecodeReader<R, B> {
/// The underlying reader, wrapped in a peeker for reading a BOM if one
/// exists.
rdr: BomPeeker<R>,
/// The internal buffer to store transcoded bytes before they are read by
/// callers.
buf: B,
/// The current position in `buf`. Subsequent reads start here.
pos: usize,
/// The number of transcoded bytes in `buf`. Subsequent reads end here.
buflen: usize,
/// Whether this is the first read or not (in which we inspect the BOM).
first: bool,
/// Whether a "last" read has occurred. After this point, EOF will always
/// be returned.
last: bool,
/// The underlying text decoder derived from the BOM, if one exists.
decoder: Option<Decoder>,
}
impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
/// Create a new transcoder that converts a source stream to valid UTF-8.
///
/// If an encoding is specified, then it is used to transcode `rdr` to
/// UTF-8. Otherwise, if no encoding is specified, and if a UTF-16 BOM is
/// found, then the corresponding UTF-16 encoding is used to transcode
/// `rdr` to UTF-8. In all other cases, `rdr` is assumed to be at least
/// ASCII-compatible and passed through untouched.
///
/// Errors in the encoding of `rdr` are handled with the Unicode
/// replacement character. If no encoding of `rdr` is specified, then
/// errors are not handled.
pub fn new(
rdr: R,
buf: B,
enc: Option<&'static Encoding>,
) -> DecodeReader<R, B> {
DecodeReader {
rdr: BomPeeker::new(rdr),
buf: buf,
buflen: 0,
pos: 0,
first: enc.is_none(),
last: false,
decoder: enc.map(|enc| enc.new_decoder_with_bom_removal()),
}
}
/// Fill the internal buffer from the underlying reader.
///
/// If there are unread bytes in the internal buffer, then we move them
/// to the beginning of the internal buffer and fill the remainder.
///
/// If the internal buffer is too small to read additional bytes, then an
/// error is returned.
#[inline(always)] // massive perf benefit (???)
fn fill(&mut self) -> io::Result<()> {
if self.pos < self.buflen {
if self.buflen >= self.buf.as_mut().len() {
return Err(io::Error::new(
io::ErrorKind::Other,
"DecodeReader: internal buffer exhausted"));
}
let newlen = self.buflen - self.pos;
let mut tmp = Vec::with_capacity(newlen);
tmp.extend_from_slice(&self.buf.as_mut()[self.pos..self.buflen]);
self.buf.as_mut()[..newlen].copy_from_slice(&tmp);
self.buflen = newlen;
} else {
self.buflen = 0;
}
self.pos = 0;
self.buflen +=
self.rdr.read(&mut self.buf.as_mut()[self.buflen..])?;
Ok(())
}
/// Transcode the inner stream to UTF-8 in `buf`. This assumes that there
/// is a decoder capable of transcoding the inner stream to UTF-8. This
/// returns the number of bytes written to `buf`.
///
/// When this function returns, exactly one of the following things will
/// be true:
///
/// 1. A non-zero number of bytes were written to `buf`.
/// 2. The underlying reader reached EOF.
/// 3. An error is returned: the internal buffer ran out of room.
/// 4. An I/O error occurred.
///
/// Note that `buf` must have at least 4 bytes of space.
fn transcode(&mut self, buf: &mut [u8]) -> io::Result<usize> {
assert!(buf.len() >= 4);
if self.last {
return Ok(0);
}
if self.pos >= self.buflen {
self.fill()?;
}
let mut nwrite = 0;
loop {
let (_, nin, nout, _) =
self.decoder.as_mut().unwrap().decode_to_utf8(
&self.buf.as_mut()[self.pos..self.buflen], buf, false);
self.pos += nin;
nwrite += nout;
// If we've written at least one byte to the caller-provided
// buffer, then our mission is complete.
if nwrite > 0 {
break;
}
// Otherwise, we know that our internal buffer has insufficient
// data to transcode at least one char, so we attempt to refill it.
self.fill()?;
// Quit on EOF.
if self.buflen == 0 |
}
Ok(nwrite)
}
#[inline(never)] // impacts perf...
fn detect(&mut self) -> io::Result<()> {
let bom = self.rdr.peek_bom()?;
self.decoder = bom.decoder();
Ok(())
}
}
impl<R: io::Read, B: AsMut<[u8]>> io::Read for DecodeReader<R, B> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.first {
self.first = false;
self.detect()?;
}
if self.decoder.is_none() {
return self.rdr.read(buf);
}
// When decoding UTF-8, we need at least 4 bytes of space to guarantee
// that we can decode at least one codepoint. If we don't have it, we
// can either return `0` for the number of bytes read or return an
// error. Since `0` would be interpreted as a possibly premature EOF,
// we opt for an error.
if buf.len() < 4 {
return Err(io::Error::new(
io::ErrorKind::Other,
"DecodeReader: byte buffer must have length at least 4"));
}
self.transcode(buf)
}
}
#[cfg(test)]
mod tests {
use std::io::Read;
use encoding_rs::Encoding;
use super::{Bom, BomPeeker, DecodeReader};
fn read_to_string<R: Read>(mut rdr: R) -> String {
let mut s = String::new();
rdr.read_to_string(&mut s).unwrap();
s
}
#[test]
fn peeker_empty() {
let buf = [];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(Bom { bytes: [0; 3], len: 0}, peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_one() {
let buf = [1];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 0, 0], len: 1},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_two() {
let buf = [1, 2];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 0], len: 2},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(2, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_three() {
let buf = [1, 2, 3];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 3], len: 3},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(3, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(3, tmp[2]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_four() {
let buf = [1, 2, 3, 4];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 3], len: 3},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(3, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(3, tmp[2]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(4, tmp[0]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_one_at_a_time() {
let buf = [1, 2, 3, 4];
let mut peeker = BomPeeker::new(&buf[..]);
let mut tmp = [0; 1];
assert_eq!(0, peeker.read(&mut tmp[..0]).unwrap());
assert_eq!(0, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(2, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(3, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(4, tmp[0]);
}
// In cases where all we have is a bom, we expect the bytes to be
// passed through unchanged.
#[test]
fn trans_utf16_bom() {
let srcbuf = vec![0xFF, 0xFE];
let mut dstbuf = vec![0; 8 * (1<<10)];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
let srcbuf = vec![0xFE, 0xFF];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
let srcbuf = vec![0xEF, 0xBB, 0xBF];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
}
// Test basic UTF-16 decoding.
#[test]
fn trans_utf16_basic() {
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a", read_to_string(&mut rdr));
let srcbuf = vec![0xFE, 0xFF, 0x00, 0x61];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a", read_to_string(&mut rdr));
}
// Test incomplete UTF-16 decoding. This ensures we see a replacement char
// if the stream ends with an unpaired code unit.
#[test]
fn trans_utf16_incomplete() {
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00, 0x00];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a\u{FFFD}", read_to_string(&mut rdr));
}
macro_rules! test_trans_simple {
($name:ident, $enc:expr, $srcbytes:expr, $dst:expr) => {
#[test]
fn $name() {
let srcbuf = &$srcbytes[..];
let enc = Encoding::for_label($enc.as_bytes());
let mut rdr = DecodeReader::new(
&*srcbuf, vec![0; 8 * (1<<10)], enc);
assert_eq!($dst, read_to_string(&mut rdr));
}
}
}
// This isn't exhaustive obviously, but it lets us test base level support.
test_trans_simple!(trans_simple_auto, "does not exist", b"\xD0\x96", "Ж");
test_trans_simple!(trans_simple_utf8, "utf-8", b"\xD0\x96", "Ж");
test_trans_simple!(trans_simple_utf16le, "utf-16le", b"\x16\x04", "Ж");
test_trans_simple!(trans_simple_utf16be, "utf-16be", b"\x04\x16", "Ж");
test_trans_simple!(trans_simple_chinese, "chinese", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_korean, "korean", b"\xAC\xA8", "Ж");
test_trans_simple!(
trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж");
test_trans_simple!(trans_simple_gbk, "gbk", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_sjis, "sjis", b"\x84\x47", "Ж");
test_trans_simple!(trans_simple_eucjp, "euc-jp", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_latin1, "latin1", b"\xA9", "©");
}
| {
self.pos = 0;
self.last = true;
let (_, _, nout, _) =
self.decoder.as_mut().unwrap().decode_to_utf8(
&[], buf, true);
return Ok(nout);
} | conditional_block |
decoder.rs | use std::cmp;
use std::io::{self, Read};
use encoding_rs::{Decoder, Encoding, UTF_8};
/// A BOM is at least 2 bytes and at most 3 bytes.
///
/// If fewer than 2 bytes are available to be read at the beginning of a
/// reader, then a BOM is `None`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct Bom {
bytes: [u8; 3],
len: usize,
}
impl Bom {
fn as_slice(&self) -> &[u8] {
&self.bytes[0..self.len]
}
fn decoder(&self) -> Option<Decoder> {
let bom = self.as_slice();
if bom.len() < 3 {
return None;
}
if let Some((enc, _)) = Encoding::for_bom(bom) {
if enc != UTF_8 {
return Some(enc.new_decoder_with_bom_removal());
}
}
None
}
}
/// `BomPeeker` wraps `R` and satisfies the `io::Read` interface while also
/// providing a peek at the BOM if one exists. Peeking at the BOM does not
/// advance the reader.
struct BomPeeker<R> {
rdr: R,
bom: Option<Bom>,
nread: usize,
}
impl<R: io::Read> BomPeeker<R> {
/// Create a new BomPeeker.
///
/// The first three bytes can be read using the `peek_bom` method, but
/// will not advance the reader.
fn new(rdr: R) -> BomPeeker<R> {
BomPeeker { rdr: rdr, bom: None, nread: 0 }
}
/// Peek at the first three bytes of the underlying reader.
///
/// This does not advance the reader provided by `BomPeeker`.
///
/// If the underlying reader does not have at least two bytes available,
/// then `None` is returned.
fn peek_bom(&mut self) -> io::Result<Bom> {
if let Some(bom) = self.bom {
return Ok(bom);
}
self.bom = Some(Bom { bytes: [0; 3], len: 0 });
let mut buf = [0u8; 3];
let bom_len = read_full(&mut self.rdr, &mut buf)?;
self.bom = Some(Bom { bytes: buf, len: bom_len });
Ok(self.bom.unwrap())
}
}
impl<R: io::Read> io::Read for BomPeeker<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.nread < 3 {
let bom = self.peek_bom()?;
let bom = bom.as_slice();
if self.nread < bom.len() {
let rest = &bom[self.nread..];
let len = cmp::min(buf.len(), rest.len());
buf[..len].copy_from_slice(&rest[..len]);
self.nread += len;
return Ok(len);
}
}
let nread = self.rdr.read(buf)?;
self.nread += nread;
Ok(nread)
}
}
/// Like `io::Read::read_exact`, except it never returns `UnexpectedEof` and
/// instead returns the number of bytes read if EOF is seen before filling
/// `buf`.
fn read_full<R: io::Read>(
mut rdr: R,
mut buf: &mut [u8],
) -> io::Result<usize> {
let mut nread = 0;
while !buf.is_empty() {
match rdr.read(buf) {
Ok(0) => break,
Ok(n) => {
nread += n;
let tmp = buf;
buf = &mut tmp[n..];
}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
Ok(nread)
}
/// A reader that transcodes to UTF-8. The source encoding is determined by
/// inspecting the BOM from the stream read from `R`, if one exists. If a
/// UTF-16 BOM exists, then the source stream is transcoded to UTF-8 with
/// invalid UTF-16 sequences translated to the Unicode replacement character.
/// In all other cases, the underlying reader is passed through unchanged.
///
/// `R` is the type of the underlying reader and `B` is the type of an internal
/// buffer used to store the results of transcoding.
///
/// Note that not all methods on `io::Read` work with this implementation.
/// For example, the `bytes` adapter method attempts to read a single byte at
/// a time, but this implementation requires a buffer of size at least `4`. If
/// a buffer of size less than 4 is given, then an error is returned.
pub struct DecodeReader<R, B> {
/// The underlying reader, wrapped in a peeker for reading a BOM if one
/// exists.
rdr: BomPeeker<R>,
/// The internal buffer to store transcoded bytes before they are read by
/// callers.
buf: B,
/// The current position in `buf`. Subsequent reads start here.
pos: usize,
/// The number of transcoded bytes in `buf`. Subsequent reads end here.
buflen: usize,
/// Whether this is the first read or not (in which we inspect the BOM).
first: bool,
/// Whether a "last" read has occurred. After this point, EOF will always
/// be returned.
last: bool,
/// The underlying text decoder derived from the BOM, if one exists.
decoder: Option<Decoder>,
}
impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
/// Create a new transcoder that converts a source stream to valid UTF-8.
///
/// If an encoding is specified, then it is used to transcode `rdr` to
/// UTF-8. Otherwise, if no encoding is specified, and if a UTF-16 BOM is
/// found, then the corresponding UTF-16 encoding is used to transcode
/// `rdr` to UTF-8. In all other cases, `rdr` is assumed to be at least
/// ASCII-compatible and passed through untouched.
///
/// Errors in the encoding of `rdr` are handled with the Unicode
/// replacement character. If no encoding of `rdr` is specified, then
/// errors are not handled.
pub fn new(
rdr: R,
buf: B,
enc: Option<&'static Encoding>,
) -> DecodeReader<R, B> {
DecodeReader {
rdr: BomPeeker::new(rdr),
buf: buf,
buflen: 0,
pos: 0,
first: enc.is_none(),
last: false,
decoder: enc.map(|enc| enc.new_decoder_with_bom_removal()),
}
}
/// Fill the internal buffer from the underlying reader.
///
/// If there are unread bytes in the internal buffer, then we move them
/// to the beginning of the internal buffer and fill the remainder.
///
/// If the internal buffer is too small to read additional bytes, then an
/// error is returned.
#[inline(always)] // massive perf benefit (???)
fn fill(&mut self) -> io::Result<()> {
if self.pos < self.buflen {
if self.buflen >= self.buf.as_mut().len() {
return Err(io::Error::new(
io::ErrorKind::Other,
"DecodeReader: internal buffer exhausted"));
}
let newlen = self.buflen - self.pos;
let mut tmp = Vec::with_capacity(newlen);
tmp.extend_from_slice(&self.buf.as_mut()[self.pos..self.buflen]);
self.buf.as_mut()[..newlen].copy_from_slice(&tmp);
self.buflen = newlen;
} else {
self.buflen = 0;
}
self.pos = 0;
self.buflen +=
self.rdr.read(&mut self.buf.as_mut()[self.buflen..])?;
Ok(())
}
/// Transcode the inner stream to UTF-8 in `buf`. This assumes that there
/// is a decoder capable of transcoding the inner stream to UTF-8. This
/// returns the number of bytes written to `buf`.
///
/// When this function returns, exactly one of the following things will
/// be true:
///
/// 1. A non-zero number of bytes were written to `buf`.
/// 2. The underlying reader reached EOF.
/// 3. An error is returned: the internal buffer ran out of room.
/// 4. An I/O error occurred.
///
/// Note that `buf` must have at least 4 bytes of space.
fn transcode(&mut self, buf: &mut [u8]) -> io::Result<usize> {
assert!(buf.len() >= 4);
if self.last {
return Ok(0);
}
if self.pos >= self.buflen {
self.fill()?;
}
let mut nwrite = 0;
loop {
let (_, nin, nout, _) =
self.decoder.as_mut().unwrap().decode_to_utf8(
&self.buf.as_mut()[self.pos..self.buflen], buf, false);
self.pos += nin;
nwrite += nout;
// If we've written at least one byte to the caller-provided
// buffer, then our mission is complete.
if nwrite > 0 {
break;
}
// Otherwise, we know that our internal buffer has insufficient
// data to transcode at least one char, so we attempt to refill it.
self.fill()?;
// Quit on EOF.
if self.buflen == 0 {
self.pos = 0;
self.last = true;
let (_, _, nout, _) =
self.decoder.as_mut().unwrap().decode_to_utf8(
&[], buf, true);
return Ok(nout);
}
}
Ok(nwrite)
}
#[inline(never)] // impacts perf...
fn detect(&mut self) -> io::Result<()> {
let bom = self.rdr.peek_bom()?;
self.decoder = bom.decoder();
Ok(())
}
}
impl<R: io::Read, B: AsMut<[u8]>> io::Read for DecodeReader<R, B> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.first {
self.first = false;
self.detect()?;
}
if self.decoder.is_none() {
return self.rdr.read(buf);
}
// When decoding UTF-8, we need at least 4 bytes of space to guarantee
// that we can decode at least one codepoint. If we don't have it, we
// can either return `0` for the number of bytes read or return an
// error. Since `0` would be interpreted as a possibly premature EOF,
// we opt for an error.
if buf.len() < 4 {
return Err(io::Error::new(
io::ErrorKind::Other,
"DecodeReader: byte buffer must have length at least 4"));
}
self.transcode(buf)
}
}
#[cfg(test)]
mod tests {
use std::io::Read;
use encoding_rs::Encoding;
use super::{Bom, BomPeeker, DecodeReader};
fn read_to_string<R: Read>(mut rdr: R) -> String {
let mut s = String::new();
rdr.read_to_string(&mut s).unwrap();
s
}
#[test]
fn peeker_empty() {
let buf = [];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(Bom { bytes: [0; 3], len: 0}, peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_one() {
let buf = [1];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 0, 0], len: 1},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_two() {
let buf = [1, 2];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 0], len: 2},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(2, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_three() {
let buf = [1, 2, 3];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 3], len: 3},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(3, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(3, tmp[2]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_four() {
let buf = [1, 2, 3, 4];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 3], len: 3},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(3, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(3, tmp[2]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(4, tmp[0]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_one_at_a_time() {
let buf = [1, 2, 3, 4];
let mut peeker = BomPeeker::new(&buf[..]);
let mut tmp = [0; 1];
assert_eq!(0, peeker.read(&mut tmp[..0]).unwrap());
assert_eq!(0, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(2, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(3, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(4, tmp[0]);
}
// In cases where all we have is a bom, we expect the bytes to be
// passed through unchanged.
#[test]
fn | () {
let srcbuf = vec![0xFF, 0xFE];
let mut dstbuf = vec![0; 8 * (1<<10)];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
let srcbuf = vec![0xFE, 0xFF];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
let srcbuf = vec![0xEF, 0xBB, 0xBF];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
}
// Test basic UTF-16 decoding.
#[test]
fn trans_utf16_basic() {
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a", read_to_string(&mut rdr));
let srcbuf = vec![0xFE, 0xFF, 0x00, 0x61];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a", read_to_string(&mut rdr));
}
// Test incomplete UTF-16 decoding. This ensures we see a replacement char
// if the stream ends with an unpaired code unit.
#[test]
fn trans_utf16_incomplete() {
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00, 0x00];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a\u{FFFD}", read_to_string(&mut rdr));
}
macro_rules! test_trans_simple {
($name:ident, $enc:expr, $srcbytes:expr, $dst:expr) => {
#[test]
fn $name() {
let srcbuf = &$srcbytes[..];
let enc = Encoding::for_label($enc.as_bytes());
let mut rdr = DecodeReader::new(
&*srcbuf, vec![0; 8 * (1<<10)], enc);
assert_eq!($dst, read_to_string(&mut rdr));
}
}
}
// This isn't exhaustive obviously, but it lets us test base level support.
test_trans_simple!(trans_simple_auto, "does not exist", b"\xD0\x96", "Ж");
test_trans_simple!(trans_simple_utf8, "utf-8", b"\xD0\x96", "Ж");
test_trans_simple!(trans_simple_utf16le, "utf-16le", b"\x16\x04", "Ж");
test_trans_simple!(trans_simple_utf16be, "utf-16be", b"\x04\x16", "Ж");
test_trans_simple!(trans_simple_chinese, "chinese", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_korean, "korean", b"\xAC\xA8", "Ж");
test_trans_simple!(
trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж");
test_trans_simple!(trans_simple_gbk, "gbk", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_sjis, "sjis", b"\x84\x47", "Ж");
test_trans_simple!(trans_simple_eucjp, "euc-jp", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_latin1, "latin1", b"\xA9", "©");
}
| trans_utf16_bom | identifier_name |
decoder.rs | use std::cmp;
use std::io::{self, Read};
use encoding_rs::{Decoder, Encoding, UTF_8};
/// A BOM is at least 2 bytes and at most 3 bytes.
///
/// If fewer than 2 bytes are available to be read at the beginning of a
/// reader, then a BOM is `None`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct Bom {
bytes: [u8; 3],
len: usize,
}
impl Bom {
fn as_slice(&self) -> &[u8] {
&self.bytes[0..self.len]
}
fn decoder(&self) -> Option<Decoder> |
}
/// `BomPeeker` wraps `R` and satisfies the `io::Read` interface while also
/// providing a peek at the BOM if one exists. Peeking at the BOM does not
/// advance the reader.
struct BomPeeker<R> {
rdr: R,
bom: Option<Bom>,
nread: usize,
}
impl<R: io::Read> BomPeeker<R> {
/// Create a new BomPeeker.
///
/// The first three bytes can be read using the `peek_bom` method, but
/// will not advance the reader.
fn new(rdr: R) -> BomPeeker<R> {
BomPeeker { rdr: rdr, bom: None, nread: 0 }
}
/// Peek at the first three bytes of the underlying reader.
///
/// This does not advance the reader provided by `BomPeeker`.
///
/// If the underlying reader does not have at least two bytes available,
/// then `None` is returned.
fn peek_bom(&mut self) -> io::Result<Bom> {
if let Some(bom) = self.bom {
return Ok(bom);
}
self.bom = Some(Bom { bytes: [0; 3], len: 0 });
let mut buf = [0u8; 3];
let bom_len = read_full(&mut self.rdr, &mut buf)?;
self.bom = Some(Bom { bytes: buf, len: bom_len });
Ok(self.bom.unwrap())
}
}
impl<R: io::Read> io::Read for BomPeeker<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.nread < 3 {
let bom = self.peek_bom()?;
let bom = bom.as_slice();
if self.nread < bom.len() {
let rest = &bom[self.nread..];
let len = cmp::min(buf.len(), rest.len());
buf[..len].copy_from_slice(&rest[..len]);
self.nread += len;
return Ok(len);
}
}
let nread = self.rdr.read(buf)?;
self.nread += nread;
Ok(nread)
}
}
/// Like `io::Read::read_exact`, except it never returns `UnexpectedEof` and
/// instead returns the number of bytes read if EOF is seen before filling
/// `buf`.
fn read_full<R: io::Read>(
mut rdr: R,
mut buf: &mut [u8],
) -> io::Result<usize> {
let mut nread = 0;
while !buf.is_empty() {
match rdr.read(buf) {
Ok(0) => break,
Ok(n) => {
nread += n;
let tmp = buf;
buf = &mut tmp[n..];
}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
Ok(nread)
}
/// A reader that transcodes to UTF-8. The source encoding is determined by
/// inspecting the BOM from the stream read from `R`, if one exists. If a
/// UTF-16 BOM exists, then the source stream is transcoded to UTF-8 with
/// invalid UTF-16 sequences translated to the Unicode replacement character.
/// In all other cases, the underlying reader is passed through unchanged.
///
/// `R` is the type of the underlying reader and `B` is the type of an internal
/// buffer used to store the results of transcoding.
///
/// Note that not all methods on `io::Read` work with this implementation.
/// For example, the `bytes` adapter method attempts to read a single byte at
/// a time, but this implementation requires a buffer of size at least `4`. If
/// a buffer of size less than 4 is given, then an error is returned.
pub struct DecodeReader<R, B> {
/// The underlying reader, wrapped in a peeker for reading a BOM if one
/// exists.
rdr: BomPeeker<R>,
/// The internal buffer to store transcoded bytes before they are read by
/// callers.
buf: B,
/// The current position in `buf`. Subsequent reads start here.
pos: usize,
/// The number of transcoded bytes in `buf`. Subsequent reads end here.
buflen: usize,
/// Whether this is the first read or not (in which we inspect the BOM).
first: bool,
/// Whether a "last" read has occurred. After this point, EOF will always
/// be returned.
last: bool,
/// The underlying text decoder derived from the BOM, if one exists.
decoder: Option<Decoder>,
}
impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
/// Create a new transcoder that converts a source stream to valid UTF-8.
///
/// If an encoding is specified, then it is used to transcode `rdr` to
/// UTF-8. Otherwise, if no encoding is specified, and if a UTF-16 BOM is
/// found, then the corresponding UTF-16 encoding is used to transcode
/// `rdr` to UTF-8. In all other cases, `rdr` is assumed to be at least
/// ASCII-compatible and passed through untouched.
///
/// Errors in the encoding of `rdr` are handled with the Unicode
/// replacement character. If no encoding of `rdr` is specified, then
/// errors are not handled.
pub fn new(
rdr: R,
buf: B,
enc: Option<&'static Encoding>,
) -> DecodeReader<R, B> {
DecodeReader {
rdr: BomPeeker::new(rdr),
buf: buf,
buflen: 0,
pos: 0,
first: enc.is_none(),
last: false,
decoder: enc.map(|enc| enc.new_decoder_with_bom_removal()),
}
}
/// Fill the internal buffer from the underlying reader.
///
/// If there are unread bytes in the internal buffer, then we move them
/// to the beginning of the internal buffer and fill the remainder.
///
/// If the internal buffer is too small to read additional bytes, then an
/// error is returned.
#[inline(always)] // massive perf benefit (???)
fn fill(&mut self) -> io::Result<()> {
if self.pos < self.buflen {
if self.buflen >= self.buf.as_mut().len() {
return Err(io::Error::new(
io::ErrorKind::Other,
"DecodeReader: internal buffer exhausted"));
}
let newlen = self.buflen - self.pos;
let mut tmp = Vec::with_capacity(newlen);
tmp.extend_from_slice(&self.buf.as_mut()[self.pos..self.buflen]);
self.buf.as_mut()[..newlen].copy_from_slice(&tmp);
self.buflen = newlen;
} else {
self.buflen = 0;
}
self.pos = 0;
self.buflen +=
self.rdr.read(&mut self.buf.as_mut()[self.buflen..])?;
Ok(())
}
/// Transcode the inner stream to UTF-8 in `buf`. This assumes that there
/// is a decoder capable of transcoding the inner stream to UTF-8. This
/// returns the number of bytes written to `buf`.
///
/// When this function returns, exactly one of the following things will
/// be true:
///
/// 1. A non-zero number of bytes were written to `buf`.
/// 2. The underlying reader reached EOF.
/// 3. An error is returned: the internal buffer ran out of room.
/// 4. An I/O error occurred.
///
/// Note that `buf` must have at least 4 bytes of space.
fn transcode(&mut self, buf: &mut [u8]) -> io::Result<usize> {
assert!(buf.len() >= 4);
if self.last {
return Ok(0);
}
if self.pos >= self.buflen {
self.fill()?;
}
let mut nwrite = 0;
loop {
let (_, nin, nout, _) =
self.decoder.as_mut().unwrap().decode_to_utf8(
&self.buf.as_mut()[self.pos..self.buflen], buf, false);
self.pos += nin;
nwrite += nout;
// If we've written at least one byte to the caller-provided
// buffer, then our mission is complete.
if nwrite > 0 {
break;
}
// Otherwise, we know that our internal buffer has insufficient
// data to transcode at least one char, so we attempt to refill it.
self.fill()?;
// Quit on EOF.
if self.buflen == 0 {
self.pos = 0;
self.last = true;
let (_, _, nout, _) =
self.decoder.as_mut().unwrap().decode_to_utf8(
&[], buf, true);
return Ok(nout);
}
}
Ok(nwrite)
}
#[inline(never)] // impacts perf...
fn detect(&mut self) -> io::Result<()> {
let bom = self.rdr.peek_bom()?;
self.decoder = bom.decoder();
Ok(())
}
}
impl<R: io::Read, B: AsMut<[u8]>> io::Read for DecodeReader<R, B> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.first {
self.first = false;
self.detect()?;
}
if self.decoder.is_none() {
return self.rdr.read(buf);
}
// When decoding UTF-8, we need at least 4 bytes of space to guarantee
// that we can decode at least one codepoint. If we don't have it, we
// can either return `0` for the number of bytes read or return an
// error. Since `0` would be interpreted as a possibly premature EOF,
// we opt for an error.
if buf.len() < 4 {
return Err(io::Error::new(
io::ErrorKind::Other,
"DecodeReader: byte buffer must have length at least 4"));
}
self.transcode(buf)
}
}
#[cfg(test)]
mod tests {
use std::io::Read;
use encoding_rs::Encoding;
use super::{Bom, BomPeeker, DecodeReader};
fn read_to_string<R: Read>(mut rdr: R) -> String {
let mut s = String::new();
rdr.read_to_string(&mut s).unwrap();
s
}
#[test]
fn peeker_empty() {
let buf = [];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(Bom { bytes: [0; 3], len: 0}, peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_one() {
let buf = [1];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 0, 0], len: 1},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_two() {
let buf = [1, 2];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 0], len: 2},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(2, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_three() {
let buf = [1, 2, 3];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 3], len: 3},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(3, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(3, tmp[2]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_four() {
let buf = [1, 2, 3, 4];
let mut peeker = BomPeeker::new(&buf[..]);
assert_eq!(
Bom { bytes: [1, 2, 3], len: 3},
peeker.peek_bom().unwrap());
let mut tmp = [0; 100];
assert_eq!(3, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(2, tmp[1]);
assert_eq!(3, tmp[2]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(4, tmp[0]);
assert_eq!(0, peeker.read(&mut tmp).unwrap());
}
#[test]
fn peeker_one_at_a_time() {
let buf = [1, 2, 3, 4];
let mut peeker = BomPeeker::new(&buf[..]);
let mut tmp = [0; 1];
assert_eq!(0, peeker.read(&mut tmp[..0]).unwrap());
assert_eq!(0, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(1, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(2, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(3, tmp[0]);
assert_eq!(1, peeker.read(&mut tmp).unwrap());
assert_eq!(4, tmp[0]);
}
// In cases where all we have is a bom, we expect the bytes to be
// passed through unchanged.
#[test]
fn trans_utf16_bom() {
let srcbuf = vec![0xFF, 0xFE];
let mut dstbuf = vec![0; 8 * (1<<10)];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
let srcbuf = vec![0xFE, 0xFF];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
let srcbuf = vec![0xEF, 0xBB, 0xBF];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
let n = rdr.read(&mut dstbuf).unwrap();
assert_eq!(&*srcbuf, &dstbuf[..n]);
}
// Test basic UTF-16 decoding.
#[test]
fn trans_utf16_basic() {
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a", read_to_string(&mut rdr));
let srcbuf = vec![0xFE, 0xFF, 0x00, 0x61];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a", read_to_string(&mut rdr));
}
// Test incomplete UTF-16 decoding. This ensures we see a replacement char
// if the stream ends with an unpaired code unit.
#[test]
fn trans_utf16_incomplete() {
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00, 0x00];
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
assert_eq!("a\u{FFFD}", read_to_string(&mut rdr));
}
macro_rules! test_trans_simple {
($name:ident, $enc:expr, $srcbytes:expr, $dst:expr) => {
#[test]
fn $name() {
let srcbuf = &$srcbytes[..];
let enc = Encoding::for_label($enc.as_bytes());
let mut rdr = DecodeReader::new(
&*srcbuf, vec![0; 8 * (1<<10)], enc);
assert_eq!($dst, read_to_string(&mut rdr));
}
}
}
// This isn't exhaustive obviously, but it lets us test base level support.
test_trans_simple!(trans_simple_auto, "does not exist", b"\xD0\x96", "Ж");
test_trans_simple!(trans_simple_utf8, "utf-8", b"\xD0\x96", "Ж");
test_trans_simple!(trans_simple_utf16le, "utf-16le", b"\x16\x04", "Ж");
test_trans_simple!(trans_simple_utf16be, "utf-16be", b"\x04\x16", "Ж");
test_trans_simple!(trans_simple_chinese, "chinese", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_korean, "korean", b"\xAC\xA8", "Ж");
test_trans_simple!(
trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж");
test_trans_simple!(trans_simple_gbk, "gbk", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_sjis, "sjis", b"\x84\x47", "Ж");
test_trans_simple!(trans_simple_eucjp, "euc-jp", b"\xA7\xA8", "Ж");
test_trans_simple!(trans_simple_latin1, "latin1", b"\xA9", "©");
}
| {
let bom = self.as_slice();
if bom.len() < 3 {
return None;
}
if let Some((enc, _)) = Encoding::for_bom(bom) {
if enc != UTF_8 {
return Some(enc.new_decoder_with_bom_removal());
}
}
None
} | identifier_body |
action.go | // Copyright (c) 2022 IoTeX Foundation
// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability
// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed.
// This source code is governed by Apache License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"encoding/hex"
"math/big"
"strings"
"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/ioctl"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/flag"
"github.com/iotexproject/iotex-core/ioctl/newcmd/account"
"github.com/iotexproject/iotex-core/ioctl/newcmd/bc"
"github.com/iotexproject/iotex-core/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
// Multi-language support
var (
_actionCmdShorts = map[config.Language]string{
config.English: "Manage actions of IoTeX blockchain",
config.Chinese: "管理IoTex区块链的行为", // this translation
}
_infoWarn = map[config.Language]string{
config.English: "** This is an irreversible action!\n" +
"Once an account is deleted, all the assets under this account may be lost!\n" +
"Type 'YES' to continue, quit for anything else.",
config.Chinese: "** 这是一个不可逆转的操作!\n" +
"一旦一个账户被删除, 该账户下的所有资源都可能会丢失!\n" +
"输入 'YES' 以继续, 否则退出",
}
_infoQuit = map[config.Language]string{
config.English: "quit",
config.Chinese: "退出",
}
_flagGasLimitUsages = map[config.Language]string{
config.English: "set gas limit",
config.Chinese: "设置燃气上限",
}
_flagGasPriceUsages = map[config.Language]string{
config.English: `set gas price (unit: 10^(-6)IOTX), use suggested gas price if input is "0"`,
config.Chinese: `设置燃气费(单位:10^(-6)IOTX),如果输入为「0」,则使用默认燃气费`,
}
_flagNonceUsages = map[config.Language]string{
config.English: "set nonce (default using pending nonce)",
config.Chinese: "设置 nonce (默认使用 pending nonce)",
}
_flagSignerUsages = map[config.Language]string{
config.English: "choose a signing account",
config.Chinese: "选择要签名的帐户",
}
_flagBytecodeUsages = map[config.Language]string{
config.English: "set the byte code",
config.Chinese: "设置字节码",
}
_flagAssumeYesUsages = map[config.Language]string{
config.English: "answer yes for all confirmations",
config.Chinese: "为所有确认设置 yes",
}
_flagPasswordUsages = map[config.Language]string{
config.English: "input password for account",
config.Chinese: "设置密码",
}
)
// Flag label, short label and defaults
const (
gasLimitFlagLabel = "gas-limit"
gasLimitFlagShortLabel = "l"
GasLimitFlagDefault = uint64(20000000)
gasPriceFlagLabel = "gas-price"
gasPriceFlagShortLabel = "p"
gasPriceFlagDefault = "1"
nonceFlagLabel = "nonce"
nonceFlagShortLabel = "n"
nonceFlagDefault = uint64(0)
signerFlagLabel = "signer"
signerFlagShortLabel = "s"
SignerFlagDefault = ""
bytecodeFlagLabel = "bytecode"
bytecodeFlagShortLabel = "b"
bytecodeFlagDefault = ""
assumeYesFlagLabel = "assume-yes"
assumeYesFlagShortLabel = "y"
assumeYesFlagDefault = false
passwordFlagLabel = "password"
passwordFlagShortLabel = "P"
passwordFlagDefault = ""
)
func registerGasLimitFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewUint64VarP(gasLimitFlagLabel, gasLimitFlagShortLabel, GasLimitFlagDefault, selectTranslation(client, _flagGasLimitUsages)).RegisterCommand(cmd)
}
func registerGasPriceFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(gasPriceFlagLabel, gasPriceFlagShortLabel, gasPriceFlagDefault, selectTranslation(client, _flagGasPriceUsages)).RegisterCommand(cmd)
}
func registerNonceFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewUint64VarP(nonceFlagLabel, nonceFlagShortLabel, nonceFlagDefault, selectTranslation(client, _flagNonceUsages)).RegisterCommand(cmd)
}
func registerSignerFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(signerFlagLabel, signerFlagShortLabel, SignerFlagDefault, selectTranslation(client, _flagSignerUsages)).RegisterCommand(cmd)
}
func registerBytecodeFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(bytecodeFlagLabel, bytecodeFlagShortLabel, bytecodeFlagDefault, selectTranslation(client, _flagBytecodeUsages)).RegisterCommand(cmd)
} | }
func registerPasswordFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(passwordFlagLabel, passwordFlagShortLabel, passwordFlagDefault, selectTranslation(client, _flagPasswordUsages)).RegisterCommand(cmd)
}
func selectTranslation(client ioctl.Client, trls map[config.Language]string) string {
txt, _ := client.SelectTranslation(trls)
return txt
}
// NewActionCmd represents the action command
func NewActionCmd(client ioctl.Client) *cobra.Command {
cmd := &cobra.Command{
Use: "action",
Short: selectTranslation(client, _actionCmdShorts),
}
// TODO add sub commands
// cmd.AddCommand(NewActionHash(client))
// cmd.AddCommand(NewActionTransfer(client))
// cmd.AddCommand(NewActionDeploy(client))
// cmd.AddCommand(NewActionInvoke(client))
// cmd.AddCommand(NewActionRead(client))
// cmd.AddCommand(NewActionClaim(client))
// cmd.AddCommand(NewActionDeposit(client))
// cmd.AddCommand(NewActionSendRaw(client))
client.SetEndpointWithFlag(cmd.PersistentFlags().StringVar)
client.SetInsecureWithFlag(cmd.PersistentFlags().BoolVar)
return cmd
}
// RegisterWriteCommand registers action flags for command
func RegisterWriteCommand(client ioctl.Client, cmd *cobra.Command) {
registerGasLimitFlag(client, cmd)
registerGasPriceFlag(client, cmd)
registerSignerFlag(client, cmd)
registerNonceFlag(client, cmd)
registerAssumeYesFlag(client, cmd)
registerPasswordFlag(client, cmd)
}
// GetWriteCommandFlag returns action flags for command
func GetWriteCommandFlag(cmd *cobra.Command) (gasPrice, signer, password string, nonce, gasLimit uint64, assumeYes bool, err error) {
gasPrice, err = cmd.Flags().GetString(gasPriceFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag gas-price")
return
}
signer, err = cmd.Flags().GetString(signerFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag signer")
return
}
password, err = cmd.Flags().GetString(passwordFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag password")
return
}
nonce, err = cmd.Flags().GetUint64(nonceFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag nonce")
return
}
gasLimit, err = cmd.Flags().GetUint64(gasLimitFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag gas-limit")
return
}
assumeYes, err = cmd.Flags().GetBool(assumeYesFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag assume-yes")
return
}
return
}
func handleClientRequestError(err error, apiName string) error {
if sta, ok := status.FromError(err); ok {
if sta.Code() == codes.Unavailable {
return ioctl.ErrInvalidEndpointOrInsecure
}
return errors.New(sta.Message())
}
return errors.Wrapf(err, "failed to invoke %s api", apiName)
}
// Signer returns signer's address
func Signer(client ioctl.Client, signer string) (string, error) {
if util.AliasIsHdwalletKey(signer) {
return signer, nil
}
return client.AddressWithDefaultIfNotExist(signer)
}
func checkNonce(client ioctl.Client, nonce uint64, executor string) (uint64, error) {
if util.AliasIsHdwalletKey(executor) {
// for hdwallet key, get the nonce in SendAction()
return 0, nil
}
if nonce != 0 {
return nonce, nil
}
accountMeta, err := account.Meta(client, executor)
if err != nil {
return 0, errors.Wrap(err, "failed to get account meta")
}
return accountMeta.PendingNonce, nil
}
// gasPriceInRau returns the suggest gas price
func gasPriceInRau(client ioctl.Client, gasPrice string) (*big.Int, error) {
if client.IsCryptoSm2() {
return big.NewInt(0), nil
}
if len(gasPrice) != 0 {
return util.StringToRau(gasPrice, util.GasPriceDecimalNum)
}
cli, err := client.APIServiceClient()
if err != nil {
return nil, errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
rsp, err := cli.SuggestGasPrice(ctx, &iotexapi.SuggestGasPriceRequest{})
if err != nil {
return nil, handleClientRequestError(err, "SuggestGasPrice")
}
return new(big.Int).SetUint64(rsp.GasPrice), nil
}
func fixGasLimit(client ioctl.Client, caller string, execution *action.Execution) (*action.Execution, error) {
cli, err := client.APIServiceClient()
if err != nil {
return nil, errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
res, err := cli.EstimateActionGasConsumption(ctx,
&iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_Execution{
Execution: execution.Proto(),
},
CallerAddress: caller,
})
if err != nil {
return nil, handleClientRequestError(err, "EstimateActionGasConsumption")
}
return action.NewExecution(execution.Contract(), execution.Nonce(), execution.Amount(), res.Gas, execution.GasPrice(), execution.Data())
}
// SendRaw sends raw action to blockchain
func SendRaw(client ioctl.Client, cmd *cobra.Command, selp *iotextypes.Action) error {
cli, err := client.APIServiceClient()
if err != nil {
return errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
_, err = cli.SendAction(ctx, &iotexapi.SendActionRequest{Action: selp})
if err != nil {
return handleClientRequestError(err, "SendAction")
}
shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))
txhash := hex.EncodeToString(shash[:])
URL := "https://"
endpoint := client.Config().Endpoint
explorer := client.Config().Explorer
switch explorer {
case "iotexscan":
if strings.Contains(endpoint, "testnet") {
URL += "testnet."
}
URL += "iotexscan.io/action/" + txhash
case "iotxplorer":
URL = "iotxplorer.io/actions/" + txhash
default:
URL = explorer + txhash
}
cmd.Printf("Action has been sent to blockchain.\nWait for several seconds and query this action by hash: %s\n", URL)
return nil
}
// SendAction sends signed action to blockchain
func SendAction(client ioctl.Client,
cmd *cobra.Command,
elp action.Envelope,
signer, password string,
nonce uint64,
assumeYes bool,
) error {
sk, err := account.PrivateKeyFromSigner(client, cmd, signer, password)
if err != nil {
return errors.Wrap(err, "failed to get privateKey")
}
chainMeta, err := bc.GetChainMeta(client)
if err != nil {
return errors.Wrap(err, "failed to get chain meta")
}
elp.SetChainID(chainMeta.GetChainID())
if util.AliasIsHdwalletKey(signer) {
addr := sk.PublicKey().Address()
signer = addr.String()
nonce, err = checkNonce(client, nonce, signer)
if err != nil {
return errors.Wrap(err, "failed to get nonce")
}
elp.SetNonce(nonce)
}
sealed, err := action.Sign(elp, sk)
if err != nil {
return errors.Wrap(err, "failed to sign action")
}
if err := isBalanceEnough(client, signer, sealed); err != nil {
return errors.Wrap(err, "failed to pass balance check")
}
selp := sealed.Proto()
sk.Zero()
actionInfo, err := printActionProto(client, selp)
if err != nil {
return errors.Wrap(err, "failed to print action proto message")
}
cmd.Println(actionInfo)
if !assumeYes {
infoWarn := selectTranslation(client, _infoWarn)
infoQuit := selectTranslation(client, _infoQuit)
confirmed, err := client.AskToConfirm(infoWarn)
if err != nil {
return errors.Wrap(err, "failed to ask confirm")
}
if !confirmed {
cmd.Println(infoQuit)
return nil
}
}
return SendRaw(client, cmd, selp)
}
// Execute sends signed execution's transaction to blockchain
func Execute(client ioctl.Client,
cmd *cobra.Command,
contract string,
amount *big.Int,
bytecode []byte,
gasPrice, signer, password string,
nonce, gasLimit uint64,
assumeYes bool,
) error {
if len(contract) == 0 && len(bytecode) == 0 {
return errors.New("failed to deploy contract with empty bytecode")
}
gasPriceRau, err := gasPriceInRau(client, gasPrice)
if err != nil {
return errors.Wrap(err, "failed to get gas price")
}
sender, err := Signer(client, signer)
if err != nil {
return errors.Wrap(err, "failed to get signer address")
}
nonce, err = checkNonce(client, nonce, sender)
if err != nil {
return errors.Wrap(err, "failed to get nonce")
}
tx, err := action.NewExecution(contract, nonce, amount, gasLimit, gasPriceRau, bytecode)
if err != nil || tx == nil {
return errors.Wrap(err, "failed to make a Execution instance")
}
if gasLimit == 0 {
tx, err = fixGasLimit(client, sender, tx)
if err != nil || tx == nil {
return errors.Wrap(err, "failed to fix Execution gas limit")
}
gasLimit = tx.GasLimit()
}
return SendAction(
client,
cmd,
(&action.EnvelopeBuilder{}).
SetNonce(nonce).
SetGasPrice(gasPriceRau).
SetGasLimit(gasLimit).
SetAction(tx).Build(),
sender,
password,
nonce,
assumeYes,
)
}
// Read reads smart contract on IoTeX blockchain
func Read(client ioctl.Client,
contract address.Address,
amount string,
bytecode []byte,
signer string,
gasLimit uint64,
) (string, error) {
cli, err := client.APIServiceClient()
if err != nil {
return "", errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
callerAddr, err := Signer(client, signer)
if err != nil {
return "", errors.Wrap(err, "failed to get signer address")
}
if callerAddr == "" {
callerAddr = address.ZeroAddress
}
res, err := cli.ReadContract(ctx,
&iotexapi.ReadContractRequest{
Execution: &iotextypes.Execution{
Amount: amount,
Contract: contract.String(),
Data: bytecode,
},
CallerAddress: callerAddr,
GasLimit: gasLimit,
},
)
if err != nil {
return "", handleClientRequestError(err, "ReadContract")
}
return res.Data, nil
}
func isBalanceEnough(client ioctl.Client, address string, act action.SealedEnvelope) error {
accountMeta, err := account.Meta(client, address)
if err != nil {
return errors.Wrap(err, "failed to get account meta")
}
balance, ok := new(big.Int).SetString(accountMeta.Balance, 10)
if !ok {
return errors.New("failed to convert balance into big int")
}
cost, err := act.Cost()
if err != nil {
return errors.Wrap(err, "failed to check cost of an action")
}
if balance.Cmp(cost) < 0 {
return errors.New("balance is not enough")
}
return nil
} |
func registerAssumeYesFlag(client ioctl.Client, cmd *cobra.Command) {
flag.BoolVarP(assumeYesFlagLabel, assumeYesFlagShortLabel, assumeYesFlagDefault, selectTranslation(client, _flagAssumeYesUsages)).RegisterCommand(cmd) | random_line_split |
action.go | // Copyright (c) 2022 IoTeX Foundation
// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability
// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed.
// This source code is governed by Apache License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"encoding/hex"
"math/big"
"strings"
"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/ioctl"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/flag"
"github.com/iotexproject/iotex-core/ioctl/newcmd/account"
"github.com/iotexproject/iotex-core/ioctl/newcmd/bc"
"github.com/iotexproject/iotex-core/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
// Multi-language support
var (
_actionCmdShorts = map[config.Language]string{
config.English: "Manage actions of IoTeX blockchain",
config.Chinese: "管理IoTex区块链的行为", // this translation
}
_infoWarn = map[config.Language]string{
config.English: "** This is an irreversible action!\n" +
"Once an account is deleted, all the assets under this account may be lost!\n" +
"Type 'YES' to continue, quit for anything else.",
config.Chinese: "** 这是一个不可逆转的操作!\n" +
"一旦一个账户被删除, 该账户下的所有资源都可能会丢失!\n" +
"输入 'YES' 以继续, 否则退出",
}
_infoQuit = map[config.Language]string{
config.English: "quit",
config.Chinese: "退出",
}
_flagGasLimitUsages = map[config.Language]string{
config.English: "set gas limit",
config.Chinese: "设置燃气上限",
}
_flagGasPriceUsages = map[config.Language]string{
config.English: `set gas price (unit: 10^(-6)IOTX), use suggested gas price if input is "0"`,
config.Chinese: `设置燃气费(单位:10^(-6)IOTX),如果输入为「0」,则使用默认燃气费`,
}
_flagNonceUsages = map[config.Language]string{
config.English: "set nonce (default using pending nonce)",
config.Chinese: "设置 nonce (默认使用 pending nonce)",
}
_flagSignerUsages = map[config.Language]string{
config.English: "choose a signing account",
config.Chinese: "选择要签名的帐户",
}
_flagBytecodeUsages = map[config.Language]string{
config.English: "set the byte code",
config.Chinese: "设置字节码",
}
_flagAssumeYesUsages = map[config.Language]string{
config.English: "answer yes for all confirmations",
config.Chinese: "为所有确认设置 yes",
}
_flagPasswordUsages = map[config.Language]string{
config.English: "input password for account",
config.Chinese: "设置密码",
}
)
// Flag label, short label and defaults
const (
gasLimitFlagLabel = "gas-limit"
gasLimitFlagShortLabel = "l"
GasLimitFlagDefault = uint64(20000000)
gasPriceFlagLabel = "gas-price"
gasPriceFlagShortLabel = "p"
gasPriceFlagDefault = "1"
nonceFlagLabel = "nonce"
nonceFlagShortLabel = "n"
nonceFlagDefault = uint64(0)
signerFlagLabel = "signer"
signerFlagShortLabel = "s"
SignerFlagDefault = ""
bytecodeFlagLabel = "bytecode"
bytecodeFlagShortLabel = "b"
bytecodeFlagDefault = ""
assumeYesFlagLabel = "assume-yes"
assumeYesFlagShortLabel = "y"
assumeYesFlagDefault = false
passwordFlagLabel = "password"
passwordFlagShortLabel = "P"
passwordFlagDefault = ""
)
func registerGasLimitFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewUint64VarP(gasLimitFlagLabel, gasLimitFlagShortLabel, GasLimitFlagDefault, selectTranslation(client, _flagGasLimitUsages)).RegisterCommand(cmd)
}
func registerGasPriceFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(gasPriceFlagLabel, gasPriceFlagShortLabel, gasPriceFlagDefault, selectTranslation(client, _flagGasPriceUsages)).RegisterCommand(cmd)
}
func registerNonceFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewUint64VarP(nonceFlagLabel, nonceFlagShortLabel, nonceFlagDefault, selectTranslation(client, _flagNonceUsages)).RegisterCommand(cmd)
}
func registerSignerFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(signerFlagLabel, signerFlagShortLabel, SignerFlagDefault, selectTranslation(client, _flagSignerUsages)).RegisterCommand(cmd)
}
func registerBytecodeFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewString | gisterAssumeYesFlag(client ioctl.Client, cmd *cobra.Command) {
flag.BoolVarP(assumeYesFlagLabel, assumeYesFlagShortLabel, assumeYesFlagDefault, selectTranslation(client, _flagAssumeYesUsages)).RegisterCommand(cmd)
}
func registerPasswordFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(passwordFlagLabel, passwordFlagShortLabel, passwordFlagDefault, selectTranslation(client, _flagPasswordUsages)).RegisterCommand(cmd)
}
func selectTranslation(client ioctl.Client, trls map[config.Language]string) string {
txt, _ := client.SelectTranslation(trls)
return txt
}
// NewActionCmd represents the action command
func NewActionCmd(client ioctl.Client) *cobra.Command {
cmd := &cobra.Command{
Use: "action",
Short: selectTranslation(client, _actionCmdShorts),
}
// TODO add sub commands
// cmd.AddCommand(NewActionHash(client))
// cmd.AddCommand(NewActionTransfer(client))
// cmd.AddCommand(NewActionDeploy(client))
// cmd.AddCommand(NewActionInvoke(client))
// cmd.AddCommand(NewActionRead(client))
// cmd.AddCommand(NewActionClaim(client))
// cmd.AddCommand(NewActionDeposit(client))
// cmd.AddCommand(NewActionSendRaw(client))
client.SetEndpointWithFlag(cmd.PersistentFlags().StringVar)
client.SetInsecureWithFlag(cmd.PersistentFlags().BoolVar)
return cmd
}
// RegisterWriteCommand registers action flags for command
func RegisterWriteCommand(client ioctl.Client, cmd *cobra.Command) {
registerGasLimitFlag(client, cmd)
registerGasPriceFlag(client, cmd)
registerSignerFlag(client, cmd)
registerNonceFlag(client, cmd)
registerAssumeYesFlag(client, cmd)
registerPasswordFlag(client, cmd)
}
// GetWriteCommandFlag returns action flags for command
func GetWriteCommandFlag(cmd *cobra.Command) (gasPrice, signer, password string, nonce, gasLimit uint64, assumeYes bool, err error) {
gasPrice, err = cmd.Flags().GetString(gasPriceFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag gas-price")
return
}
signer, err = cmd.Flags().GetString(signerFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag signer")
return
}
password, err = cmd.Flags().GetString(passwordFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag password")
return
}
nonce, err = cmd.Flags().GetUint64(nonceFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag nonce")
return
}
gasLimit, err = cmd.Flags().GetUint64(gasLimitFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag gas-limit")
return
}
assumeYes, err = cmd.Flags().GetBool(assumeYesFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag assume-yes")
return
}
return
}
func handleClientRequestError(err error, apiName string) error {
if sta, ok := status.FromError(err); ok {
if sta.Code() == codes.Unavailable {
return ioctl.ErrInvalidEndpointOrInsecure
}
return errors.New(sta.Message())
}
return errors.Wrapf(err, "failed to invoke %s api", apiName)
}
// Signer returns signer's address
func Signer(client ioctl.Client, signer string) (string, error) {
if util.AliasIsHdwalletKey(signer) {
return signer, nil
}
return client.AddressWithDefaultIfNotExist(signer)
}
func checkNonce(client ioctl.Client, nonce uint64, executor string) (uint64, error) {
if util.AliasIsHdwalletKey(executor) {
// for hdwallet key, get the nonce in SendAction()
return 0, nil
}
if nonce != 0 {
return nonce, nil
}
accountMeta, err := account.Meta(client, executor)
if err != nil {
return 0, errors.Wrap(err, "failed to get account meta")
}
return accountMeta.PendingNonce, nil
}
// gasPriceInRau returns the suggest gas price
func gasPriceInRau(client ioctl.Client, gasPrice string) (*big.Int, error) {
if client.IsCryptoSm2() {
return big.NewInt(0), nil
}
if len(gasPrice) != 0 {
return util.StringToRau(gasPrice, util.GasPriceDecimalNum)
}
cli, err := client.APIServiceClient()
if err != nil {
return nil, errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
rsp, err := cli.SuggestGasPrice(ctx, &iotexapi.SuggestGasPriceRequest{})
if err != nil {
return nil, handleClientRequestError(err, "SuggestGasPrice")
}
return new(big.Int).SetUint64(rsp.GasPrice), nil
}
func fixGasLimit(client ioctl.Client, caller string, execution *action.Execution) (*action.Execution, error) {
cli, err := client.APIServiceClient()
if err != nil {
return nil, errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
res, err := cli.EstimateActionGasConsumption(ctx,
&iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_Execution{
Execution: execution.Proto(),
},
CallerAddress: caller,
})
if err != nil {
return nil, handleClientRequestError(err, "EstimateActionGasConsumption")
}
return action.NewExecution(execution.Contract(), execution.Nonce(), execution.Amount(), res.Gas, execution.GasPrice(), execution.Data())
}
// SendRaw sends raw action to blockchain
func SendRaw(client ioctl.Client, cmd *cobra.Command, selp *iotextypes.Action) error {
cli, err := client.APIServiceClient()
if err != nil {
return errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
_, err = cli.SendAction(ctx, &iotexapi.SendActionRequest{Action: selp})
if err != nil {
return handleClientRequestError(err, "SendAction")
}
shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))
txhash := hex.EncodeToString(shash[:])
URL := "https://"
endpoint := client.Config().Endpoint
explorer := client.Config().Explorer
switch explorer {
case "iotexscan":
if strings.Contains(endpoint, "testnet") {
URL += "testnet."
}
URL += "iotexscan.io/action/" + txhash
case "iotxplorer":
URL = "iotxplorer.io/actions/" + txhash
default:
URL = explorer + txhash
}
cmd.Printf("Action has been sent to blockchain.\nWait for several seconds and query this action by hash: %s\n", URL)
return nil
}
// SendAction sends signed action to blockchain
func SendAction(client ioctl.Client,
cmd *cobra.Command,
elp action.Envelope,
signer, password string,
nonce uint64,
assumeYes bool,
) error {
sk, err := account.PrivateKeyFromSigner(client, cmd, signer, password)
if err != nil {
return errors.Wrap(err, "failed to get privateKey")
}
chainMeta, err := bc.GetChainMeta(client)
if err != nil {
return errors.Wrap(err, "failed to get chain meta")
}
elp.SetChainID(chainMeta.GetChainID())
if util.AliasIsHdwalletKey(signer) {
addr := sk.PublicKey().Address()
signer = addr.String()
nonce, err = checkNonce(client, nonce, signer)
if err != nil {
return errors.Wrap(err, "failed to get nonce")
}
elp.SetNonce(nonce)
}
sealed, err := action.Sign(elp, sk)
if err != nil {
return errors.Wrap(err, "failed to sign action")
}
if err := isBalanceEnough(client, signer, sealed); err != nil {
return errors.Wrap(err, "failed to pass balance check")
}
selp := sealed.Proto()
sk.Zero()
actionInfo, err := printActionProto(client, selp)
if err != nil {
return errors.Wrap(err, "failed to print action proto message")
}
cmd.Println(actionInfo)
if !assumeYes {
infoWarn := selectTranslation(client, _infoWarn)
infoQuit := selectTranslation(client, _infoQuit)
confirmed, err := client.AskToConfirm(infoWarn)
if err != nil {
return errors.Wrap(err, "failed to ask confirm")
}
if !confirmed {
cmd.Println(infoQuit)
return nil
}
}
return SendRaw(client, cmd, selp)
}
// Execute sends signed execution's transaction to blockchain
func Execute(client ioctl.Client,
cmd *cobra.Command,
contract string,
amount *big.Int,
bytecode []byte,
gasPrice, signer, password string,
nonce, gasLimit uint64,
assumeYes bool,
) error {
if len(contract) == 0 && len(bytecode) == 0 {
return errors.New("failed to deploy contract with empty bytecode")
}
gasPriceRau, err := gasPriceInRau(client, gasPrice)
if err != nil {
return errors.Wrap(err, "failed to get gas price")
}
sender, err := Signer(client, signer)
if err != nil {
return errors.Wrap(err, "failed to get signer address")
}
nonce, err = checkNonce(client, nonce, sender)
if err != nil {
return errors.Wrap(err, "failed to get nonce")
}
tx, err := action.NewExecution(contract, nonce, amount, gasLimit, gasPriceRau, bytecode)
if err != nil || tx == nil {
return errors.Wrap(err, "failed to make a Execution instance")
}
if gasLimit == 0 {
tx, err = fixGasLimit(client, sender, tx)
if err != nil || tx == nil {
return errors.Wrap(err, "failed to fix Execution gas limit")
}
gasLimit = tx.GasLimit()
}
return SendAction(
client,
cmd,
(&action.EnvelopeBuilder{}).
SetNonce(nonce).
SetGasPrice(gasPriceRau).
SetGasLimit(gasLimit).
SetAction(tx).Build(),
sender,
password,
nonce,
assumeYes,
)
}
// Read reads smart contract on IoTeX blockchain
func Read(client ioctl.Client,
contract address.Address,
amount string,
bytecode []byte,
signer string,
gasLimit uint64,
) (string, error) {
cli, err := client.APIServiceClient()
if err != nil {
return "", errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
callerAddr, err := Signer(client, signer)
if err != nil {
return "", errors.Wrap(err, "failed to get signer address")
}
if callerAddr == "" {
callerAddr = address.ZeroAddress
}
res, err := cli.ReadContract(ctx,
&iotexapi.ReadContractRequest{
Execution: &iotextypes.Execution{
Amount: amount,
Contract: contract.String(),
Data: bytecode,
},
CallerAddress: callerAddr,
GasLimit: gasLimit,
},
)
if err != nil {
return "", handleClientRequestError(err, "ReadContract")
}
return res.Data, nil
}
func isBalanceEnough(client ioctl.Client, address string, act action.SealedEnvelope) error {
accountMeta, err := account.Meta(client, address)
if err != nil {
return errors.Wrap(err, "failed to get account meta")
}
balance, ok := new(big.Int).SetString(accountMeta.Balance, 10)
if !ok {
return errors.New("failed to convert balance into big int")
}
cost, err := act.Cost()
if err != nil {
return errors.Wrap(err, "failed to check cost of an action")
}
if balance.Cmp(cost) < 0 {
return errors.New("balance is not enough")
}
return nil
}
| VarP(bytecodeFlagLabel, bytecodeFlagShortLabel, bytecodeFlagDefault, selectTranslation(client, _flagBytecodeUsages)).RegisterCommand(cmd)
}
func re | identifier_body |
action.go | // Copyright (c) 2022 IoTeX Foundation
// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability
// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed.
// This source code is governed by Apache License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"encoding/hex"
"math/big"
"strings"
"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/ioctl"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/flag"
"github.com/iotexproject/iotex-core/ioctl/newcmd/account"
"github.com/iotexproject/iotex-core/ioctl/newcmd/bc"
"github.com/iotexproject/iotex-core/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
// Multi-language support
var (
_actionCmdShorts = map[config.Language]string{
config.English: "Manage actions of IoTeX blockchain",
config.Chinese: "管理IoTex区块链的行为", // this translation
}
_infoWarn = map[config.Language]string{
config.English: "** This is an irreversible action!\n" +
"Once an account is deleted, all the assets under this account may be lost!\n" +
"Type 'YES' to continue, quit for anything else.",
config.Chinese: "** 这是一个不可逆转的操作!\n" +
"一旦一个账户被删除, 该账户下的所有资源都可能会丢失!\n" +
"输入 'YES' 以继续, 否则退出",
}
_infoQuit = map[config.Language]string{
config.English: "quit",
config.Chinese: "退出",
}
_flagGasLimitUsages = map[config.Language]string{
config.English: "set gas limit",
config.Chinese: "设置燃气上限",
}
_flagGasPriceUsages = map[config.Language]string{
config.English: `set gas price (unit: 10^(-6)IOTX), use suggested gas price if input is "0"`,
config.Chinese: `设置燃气费(单位:10^(-6)IOTX),如果输入为「0」,则使用默认燃气费`,
}
_flagNonceUsages = map[config.Language]string{
config.English: "set nonce (default using pending nonce)",
config.Chinese: "设置 nonce (默认使用 pending nonce)",
}
_flagSignerUsages = map[config.Language]string{
config.English: "choose a signing account",
config.Chinese: "选择要签名的帐户",
}
_flagBytecodeUsages = map[config.Language]string{
config.English: "set the byte code",
config.Chinese: "设置字节码",
}
_flagAssumeYesUsages = map[config.Language]string{
config.English: "answer yes for all confirmations",
config.Chinese: "为所有确认设置 yes",
}
_flagPasswordUsages = map[config.Language]string{
config.English: "input password for account",
config.Chinese: "设置密码",
}
)
// Flag label, short label and defaults
const (
gasLimitFlagLabel = "gas-limit"
gasLimitFlagShortLabel = "l"
GasLimitFlagDefault = uint64(20000000)
gasPriceFlagLabel = "gas-price"
gasPriceFlagShortLabel = "p"
gasPriceFlagDefault = "1"
nonceFlagLabel = "nonce"
nonceFlagShortLabel = "n"
nonceFlagDefault = uint64(0)
signerFlagLabel = "signer"
signerFlagShortLabel = "s"
SignerFlagDefault = ""
bytecodeFlagLabel = "bytecode"
bytecodeFlagShortLabel = "b"
bytecodeFlagDefault = ""
assumeYesFlagLabel = "assume-yes"
assumeYesFlagShortLabel = "y"
assumeYesFlagDefault = false
passwordFlagLabel = "password"
passwordFlagShortLabel = "P"
passwordFlagDefault = ""
)
func registerGasLimitFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewUint64VarP(gasLimitFlagLabel, gasLimitFlagShortLabel, GasLimitFlagDefault, selectTranslation(client, _flagGasLimitUsages)).RegisterCommand(cmd)
}
func registerGasPriceFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(gasPriceFlagLabel, gasPriceFlagShortLabel, gasPriceFlagDefault, selectTranslation(client, _flagGasPriceUsages)).RegisterCommand(cmd)
}
func registerN | l.Client, cmd *cobra.Command) {
flag.NewUint64VarP(nonceFlagLabel, nonceFlagShortLabel, nonceFlagDefault, selectTranslation(client, _flagNonceUsages)).RegisterCommand(cmd)
}
func registerSignerFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(signerFlagLabel, signerFlagShortLabel, SignerFlagDefault, selectTranslation(client, _flagSignerUsages)).RegisterCommand(cmd)
}
func registerBytecodeFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(bytecodeFlagLabel, bytecodeFlagShortLabel, bytecodeFlagDefault, selectTranslation(client, _flagBytecodeUsages)).RegisterCommand(cmd)
}
func registerAssumeYesFlag(client ioctl.Client, cmd *cobra.Command) {
flag.BoolVarP(assumeYesFlagLabel, assumeYesFlagShortLabel, assumeYesFlagDefault, selectTranslation(client, _flagAssumeYesUsages)).RegisterCommand(cmd)
}
func registerPasswordFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(passwordFlagLabel, passwordFlagShortLabel, passwordFlagDefault, selectTranslation(client, _flagPasswordUsages)).RegisterCommand(cmd)
}
func selectTranslation(client ioctl.Client, trls map[config.Language]string) string {
txt, _ := client.SelectTranslation(trls)
return txt
}
// NewActionCmd represents the action command
func NewActionCmd(client ioctl.Client) *cobra.Command {
cmd := &cobra.Command{
Use: "action",
Short: selectTranslation(client, _actionCmdShorts),
}
// TODO add sub commands
// cmd.AddCommand(NewActionHash(client))
// cmd.AddCommand(NewActionTransfer(client))
// cmd.AddCommand(NewActionDeploy(client))
// cmd.AddCommand(NewActionInvoke(client))
// cmd.AddCommand(NewActionRead(client))
// cmd.AddCommand(NewActionClaim(client))
// cmd.AddCommand(NewActionDeposit(client))
// cmd.AddCommand(NewActionSendRaw(client))
client.SetEndpointWithFlag(cmd.PersistentFlags().StringVar)
client.SetInsecureWithFlag(cmd.PersistentFlags().BoolVar)
return cmd
}
// RegisterWriteCommand registers action flags for command
func RegisterWriteCommand(client ioctl.Client, cmd *cobra.Command) {
registerGasLimitFlag(client, cmd)
registerGasPriceFlag(client, cmd)
registerSignerFlag(client, cmd)
registerNonceFlag(client, cmd)
registerAssumeYesFlag(client, cmd)
registerPasswordFlag(client, cmd)
}
// GetWriteCommandFlag returns action flags for command
func GetWriteCommandFlag(cmd *cobra.Command) (gasPrice, signer, password string, nonce, gasLimit uint64, assumeYes bool, err error) {
gasPrice, err = cmd.Flags().GetString(gasPriceFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag gas-price")
return
}
signer, err = cmd.Flags().GetString(signerFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag signer")
return
}
password, err = cmd.Flags().GetString(passwordFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag password")
return
}
nonce, err = cmd.Flags().GetUint64(nonceFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag nonce")
return
}
gasLimit, err = cmd.Flags().GetUint64(gasLimitFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag gas-limit")
return
}
assumeYes, err = cmd.Flags().GetBool(assumeYesFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag assume-yes")
return
}
return
}
func handleClientRequestError(err error, apiName string) error {
if sta, ok := status.FromError(err); ok {
if sta.Code() == codes.Unavailable {
return ioctl.ErrInvalidEndpointOrInsecure
}
return errors.New(sta.Message())
}
return errors.Wrapf(err, "failed to invoke %s api", apiName)
}
// Signer returns signer's address
func Signer(client ioctl.Client, signer string) (string, error) {
if util.AliasIsHdwalletKey(signer) {
return signer, nil
}
return client.AddressWithDefaultIfNotExist(signer)
}
func checkNonce(client ioctl.Client, nonce uint64, executor string) (uint64, error) {
if util.AliasIsHdwalletKey(executor) {
// for hdwallet key, get the nonce in SendAction()
return 0, nil
}
if nonce != 0 {
return nonce, nil
}
accountMeta, err := account.Meta(client, executor)
if err != nil {
return 0, errors.Wrap(err, "failed to get account meta")
}
return accountMeta.PendingNonce, nil
}
// gasPriceInRau returns the suggest gas price
func gasPriceInRau(client ioctl.Client, gasPrice string) (*big.Int, error) {
if client.IsCryptoSm2() {
return big.NewInt(0), nil
}
if len(gasPrice) != 0 {
return util.StringToRau(gasPrice, util.GasPriceDecimalNum)
}
cli, err := client.APIServiceClient()
if err != nil {
return nil, errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
rsp, err := cli.SuggestGasPrice(ctx, &iotexapi.SuggestGasPriceRequest{})
if err != nil {
return nil, handleClientRequestError(err, "SuggestGasPrice")
}
return new(big.Int).SetUint64(rsp.GasPrice), nil
}
func fixGasLimit(client ioctl.Client, caller string, execution *action.Execution) (*action.Execution, error) {
cli, err := client.APIServiceClient()
if err != nil {
return nil, errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
res, err := cli.EstimateActionGasConsumption(ctx,
&iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_Execution{
Execution: execution.Proto(),
},
CallerAddress: caller,
})
if err != nil {
return nil, handleClientRequestError(err, "EstimateActionGasConsumption")
}
return action.NewExecution(execution.Contract(), execution.Nonce(), execution.Amount(), res.Gas, execution.GasPrice(), execution.Data())
}
// SendRaw sends raw action to blockchain
func SendRaw(client ioctl.Client, cmd *cobra.Command, selp *iotextypes.Action) error {
cli, err := client.APIServiceClient()
if err != nil {
return errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
_, err = cli.SendAction(ctx, &iotexapi.SendActionRequest{Action: selp})
if err != nil {
return handleClientRequestError(err, "SendAction")
}
shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))
txhash := hex.EncodeToString(shash[:])
URL := "https://"
endpoint := client.Config().Endpoint
explorer := client.Config().Explorer
switch explorer {
case "iotexscan":
if strings.Contains(endpoint, "testnet") {
URL += "testnet."
}
URL += "iotexscan.io/action/" + txhash
case "iotxplorer":
URL = "iotxplorer.io/actions/" + txhash
default:
URL = explorer + txhash
}
cmd.Printf("Action has been sent to blockchain.\nWait for several seconds and query this action by hash: %s\n", URL)
return nil
}
// SendAction sends signed action to blockchain
func SendAction(client ioctl.Client,
cmd *cobra.Command,
elp action.Envelope,
signer, password string,
nonce uint64,
assumeYes bool,
) error {
sk, err := account.PrivateKeyFromSigner(client, cmd, signer, password)
if err != nil {
return errors.Wrap(err, "failed to get privateKey")
}
chainMeta, err := bc.GetChainMeta(client)
if err != nil {
return errors.Wrap(err, "failed to get chain meta")
}
elp.SetChainID(chainMeta.GetChainID())
if util.AliasIsHdwalletKey(signer) {
addr := sk.PublicKey().Address()
signer = addr.String()
nonce, err = checkNonce(client, nonce, signer)
if err != nil {
return errors.Wrap(err, "failed to get nonce")
}
elp.SetNonce(nonce)
}
sealed, err := action.Sign(elp, sk)
if err != nil {
return errors.Wrap(err, "failed to sign action")
}
if err := isBalanceEnough(client, signer, sealed); err != nil {
return errors.Wrap(err, "failed to pass balance check")
}
selp := sealed.Proto()
sk.Zero()
actionInfo, err := printActionProto(client, selp)
if err != nil {
return errors.Wrap(err, "failed to print action proto message")
}
cmd.Println(actionInfo)
if !assumeYes {
infoWarn := selectTranslation(client, _infoWarn)
infoQuit := selectTranslation(client, _infoQuit)
confirmed, err := client.AskToConfirm(infoWarn)
if err != nil {
return errors.Wrap(err, "failed to ask confirm")
}
if !confirmed {
cmd.Println(infoQuit)
return nil
}
}
return SendRaw(client, cmd, selp)
}
// Execute sends signed execution's transaction to blockchain
func Execute(client ioctl.Client,
cmd *cobra.Command,
contract string,
amount *big.Int,
bytecode []byte,
gasPrice, signer, password string,
nonce, gasLimit uint64,
assumeYes bool,
) error {
if len(contract) == 0 && len(bytecode) == 0 {
return errors.New("failed to deploy contract with empty bytecode")
}
gasPriceRau, err := gasPriceInRau(client, gasPrice)
if err != nil {
return errors.Wrap(err, "failed to get gas price")
}
sender, err := Signer(client, signer)
if err != nil {
return errors.Wrap(err, "failed to get signer address")
}
nonce, err = checkNonce(client, nonce, sender)
if err != nil {
return errors.Wrap(err, "failed to get nonce")
}
tx, err := action.NewExecution(contract, nonce, amount, gasLimit, gasPriceRau, bytecode)
if err != nil || tx == nil {
return errors.Wrap(err, "failed to make a Execution instance")
}
if gasLimit == 0 {
tx, err = fixGasLimit(client, sender, tx)
if err != nil || tx == nil {
return errors.Wrap(err, "failed to fix Execution gas limit")
}
gasLimit = tx.GasLimit()
}
return SendAction(
client,
cmd,
(&action.EnvelopeBuilder{}).
SetNonce(nonce).
SetGasPrice(gasPriceRau).
SetGasLimit(gasLimit).
SetAction(tx).Build(),
sender,
password,
nonce,
assumeYes,
)
}
// Read reads smart contract on IoTeX blockchain
func Read(client ioctl.Client,
contract address.Address,
amount string,
bytecode []byte,
signer string,
gasLimit uint64,
) (string, error) {
cli, err := client.APIServiceClient()
if err != nil {
return "", errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
callerAddr, err := Signer(client, signer)
if err != nil {
return "", errors.Wrap(err, "failed to get signer address")
}
if callerAddr == "" {
callerAddr = address.ZeroAddress
}
res, err := cli.ReadContract(ctx,
&iotexapi.ReadContractRequest{
Execution: &iotextypes.Execution{
Amount: amount,
Contract: contract.String(),
Data: bytecode,
},
CallerAddress: callerAddr,
GasLimit: gasLimit,
},
)
if err != nil {
return "", handleClientRequestError(err, "ReadContract")
}
return res.Data, nil
}
func isBalanceEnough(client ioctl.Client, address string, act action.SealedEnvelope) error {
accountMeta, err := account.Meta(client, address)
if err != nil {
return errors.Wrap(err, "failed to get account meta")
}
balance, ok := new(big.Int).SetString(accountMeta.Balance, 10)
if !ok {
return errors.New("failed to convert balance into big int")
}
cost, err := act.Cost()
if err != nil {
return errors.Wrap(err, "failed to check cost of an action")
}
if balance.Cmp(cost) < 0 {
return errors.New("balance is not enough")
}
return nil
}
| onceFlag(client ioct | identifier_name |
action.go | // Copyright (c) 2022 IoTeX Foundation
// This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability
// or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed.
// This source code is governed by Apache License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"encoding/hex"
"math/big"
"strings"
"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/ioctl"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/flag"
"github.com/iotexproject/iotex-core/ioctl/newcmd/account"
"github.com/iotexproject/iotex-core/ioctl/newcmd/bc"
"github.com/iotexproject/iotex-core/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
// Multi-language support
var (
_actionCmdShorts = map[config.Language]string{
config.English: "Manage actions of IoTeX blockchain",
config.Chinese: "管理IoTex区块链的行为", // this translation
}
_infoWarn = map[config.Language]string{
config.English: "** This is an irreversible action!\n" +
"Once an account is deleted, all the assets under this account may be lost!\n" +
"Type 'YES' to continue, quit for anything else.",
config.Chinese: "** 这是一个不可逆转的操作!\n" +
"一旦一个账户被删除, 该账户下的所有资源都可能会丢失!\n" +
"输入 'YES' 以继续, 否则退出",
}
_infoQuit = map[config.Language]string{
config.English: "quit",
config.Chinese: "退出",
}
_flagGasLimitUsages = map[config.Language]string{
config.English: "set gas limit",
config.Chinese: "设置燃气上限",
}
_flagGasPriceUsages = map[config.Language]string{
config.English: `set gas price (unit: 10^(-6)IOTX), use suggested gas price if input is "0"`,
config.Chinese: `设置燃气费(单位:10^(-6)IOTX),如果输入为「0」,则使用默认燃气费`,
}
_flagNonceUsages = map[config.Language]string{
config.English: "set nonce (default using pending nonce)",
config.Chinese: "设置 nonce (默认使用 pending nonce)",
}
_flagSignerUsages = map[config.Language]string{
config.English: "choose a signing account",
config.Chinese: "选择要签名的帐户",
}
_flagBytecodeUsages = map[config.Language]string{
config.English: "set the byte code",
config.Chinese: "设置字节码",
}
_flagAssumeYesUsages = map[config.Language]string{
config.English: "answer yes for all confirmations",
config.Chinese: "为所有确认设置 yes",
}
_flagPasswordUsages = map[config.Language]string{
config.English: "input password for account",
config.Chinese: "设置密码",
}
)
// Flag label, short label and defaults
const (
gasLimitFlagLabel = "gas-limit"
gasLimitFlagShortLabel = "l"
GasLimitFlagDefault = uint64(20000000)
gasPriceFlagLabel = "gas-price"
gasPriceFlagShortLabel = "p"
gasPriceFlagDefault = "1"
nonceFlagLabel = "nonce"
nonceFlagShortLabel = "n"
nonceFlagDefault = uint64(0)
signerFlagLabel = "signer"
signerFlagShortLabel = "s"
SignerFlagDefault = ""
bytecodeFlagLabel = "bytecode"
bytecodeFlagShortLabel = "b"
bytecodeFlagDefault = ""
assumeYesFlagLabel = "assume-yes"
assumeYesFlagShortLabel = "y"
assumeYesFlagDefault = false
passwordFlagLabel = "password"
passwordFlagShortLabel = "P"
passwordFlagDefault = ""
)
func registerGasLimitFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewUint64VarP(gasLimitFlagLabel, gasLimitFlagShortLabel, GasLimitFlagDefault, selectTranslation(client, _flagGasLimitUsages)).RegisterCommand(cmd)
}
func registerGasPriceFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(gasPriceFlagLabel, gasPriceFlagShortLabel, gasPriceFlagDefault, selectTranslation(client, _flagGasPriceUsages)).RegisterCommand(cmd)
}
func registerNonceFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewUint64VarP(nonceFlagLabel, nonceFlagShortLabel, nonceFlagDefault, selectTranslation(client, _flagNonceUsages)).RegisterCommand(cmd)
}
func registerSignerFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(signerFlagLabel, signerFlagShortLabel, SignerFlagDefault, selectTranslation(client, _flagSignerUsages)).RegisterCommand(cmd)
}
func registerBytecodeFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(bytecodeFlagLabel, bytecodeFlagShortLabel, bytecodeFlagDefault, selectTranslation(client, _flagBytecodeUsages)).RegisterCommand(cmd)
}
func registerAssumeYesFlag(client ioctl.Client, cmd *cobra.Command) {
flag.BoolVarP(assumeYesFlagLabel, assumeYesFlagShortLabel, assumeYesFlagDefault, selectTranslation(client, _flagAssumeYesUsages)).RegisterCommand(cmd)
}
func registerPasswordFlag(client ioctl.Client, cmd *cobra.Command) {
flag.NewStringVarP(passwordFlagLabel, passwordFlagShortLabel, passwordFlagDefault, selectTranslation(client, _flagPasswordUsages)).RegisterCommand(cmd)
}
func selectTranslation(client ioctl.Client, trls map[config.Language]string) string {
txt, _ := client.SelectTranslation(trls)
return txt
}
// NewActionCmd represents the action command
func NewActionCmd(client ioctl.Client) *cobra.Command {
cmd := &cobra.Command{
Use: "action",
Short: selectTranslation(client, _actionCmdShorts),
}
// TODO add sub commands
// cmd.AddCommand(NewActionHash(client))
// cmd.AddCommand(NewActionTransfer(client))
// cmd.AddCommand(NewActionDeploy(client))
// cmd.AddCommand(NewActionInvoke(client))
// cmd.AddCommand(NewActionRead(client))
// cmd.AddCommand(NewActionClaim(client))
// cmd.AddCommand(NewActionDeposit(client))
// cmd.AddCommand(NewActionSendRaw(client))
client.SetEndpointWithFlag(cmd.PersistentFlags().StringVar)
client.SetInsecureWithFlag(cmd.PersistentFlags().BoolVar)
return cmd
}
// RegisterWriteCommand registers action flags for command
func RegisterWriteCommand(client ioctl.Client, cmd *cobra.Command) {
registerGasLimitFlag(client, cmd)
registerGasPriceFlag(client, cmd)
registerSignerFlag(client, cmd)
registerNonceFlag(client, cmd)
registerAssumeYesFlag(client, cmd)
registerPasswordFlag(client, cmd)
}
// GetWriteCommandFlag returns action flags for command
func GetWriteCommandFlag(cmd *cobra.Command) (gasPrice, signer, password string, nonce, gasLimit uint64, assumeYes bool, err error) {
gasPrice, err = cmd.Flags().GetString(gasPriceFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag gas-price")
return
}
signer, err = cmd.Flags().GetString(signerFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag signer")
return
}
password, err = cmd.Flags().GetString(passwordFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag password")
return
}
nonce, err = cmd.Flags().GetUint64(nonceFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag nonce")
return
}
gasLimit, err = cmd.Flags().GetUint64(gasLimitFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag gas-limit")
return
}
assumeYes, err = cmd.Flags().GetBool(assumeYesFlagLabel)
if err != nil {
err = errors.Wrap(err, "failed to get flag assume-yes")
return
}
return
}
func handleClientRequestError(err error, apiName string) error {
if sta, ok := status.FromError(err); ok {
if sta.Code() == codes.Unavailable {
return ioctl.ErrInvalidEndpointOrInsecure
}
return errors.New(sta.Message())
}
return errors.Wrapf(err, "failed to invoke %s api", apiName)
}
// Signer returns signer's address
func Signer(client ioctl.Client, signer string) (string, error) {
if util.AliasIsHdwalletKey(signer) {
return signer, nil
}
return client.AddressWithDefaultIfNotExist(signer)
}
func checkNonce(client ioctl.Client, nonce uint64, executor string) (uint64, error) {
if util.AliasIsHdwalletKey(executor) {
// for hdwallet key, get the nonce in SendAction()
return 0, nil
}
if nonce != 0 {
return nonce, nil
}
accountMeta, err := account.Meta(client, executor)
if err != nil {
return 0, errors.Wrap(err, "failed to get account meta")
}
return accountMeta.PendingNonce, nil
}
// gasPriceInRau returns the suggest gas price
func gasPriceInRau(client ioctl.Client, gasPrice string) (*big.Int, error) {
if client.IsCryptoSm2() {
return big.NewInt(0), nil
}
if len(gasPrice) != 0 {
return util.StringToRau(gasPrice, util.GasPriceDecimalNum)
}
cli, err := client.APIServiceClient()
if err != nil {
return nil, errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
rsp, err := cli.SuggestGasPrice(ctx, &iotexapi.SuggestGasPriceRequest{})
if err != nil {
return nil, handleClientRequestError(err, "SuggestGasPrice")
}
return new(big.Int).SetUint64(rsp.GasPrice), nil
}
func fixGasLimit(client ioctl.Client, caller string, execution *action.Execution) (*action.Execution, error) {
cli, err := client.APIServiceClient()
if err != nil {
return nil, errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
res, err := cli.EstimateActionGasConsumption(ctx,
&iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_Execution{
Execution: execution.Proto(),
},
CallerAddress: caller,
})
if err != nil {
return nil, handleClientRequestError(err, "EstimateActionGasConsumption")
}
return action.NewExecution(execution.Contract(), execution.Nonce(), execution.Amount(), res.Gas, execution.GasPrice(), execution.Data())
}
// SendRaw sends raw action to blockchain
func SendRaw(client ioctl.Client, cmd *cobra.Command, selp *iotextypes.Action) error {
cli, err := client.APIServiceClient()
if err != nil {
return errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
_, err = cli.SendAction(ctx, &iotexapi.SendActionRequest{Action: selp})
if err != nil {
return handleClientRequestError(err, "SendAction")
}
shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))
txhash := hex.EncodeToString(shash[:])
URL := "https://"
endpoint := client.Config().Endpoint
explorer := client.Config().Explorer
switch explorer {
case "iotexscan":
if strings.Contains(endpoint, "testnet") {
URL += "testnet."
}
URL += "iotexscan.io/action/" + txhash
case "iotxplorer":
URL = "iotxplorer.io/actions/" + txhash
default:
URL = explorer + txhash
}
cmd.Printf("Action has been sent to blockchain.\nWait for several seconds and query this action by hash: %s\n", URL)
return nil
}
// SendAction sends signed action to blockchain
func SendAction(client ioctl.Client,
cmd *cobra.Command,
elp action.Envelope,
signer, password string,
nonce uint64,
assumeYes bool,
) error {
sk, err := account.PrivateKeyFromSigner(client, cmd, signer, password)
if err != nil {
return errors.Wrap(err, "failed to get privateKey")
}
chainMeta, err := bc.GetChainMeta(client)
if err != nil {
return errors.Wrap(err, "failed to get chain meta")
}
elp.SetChainID(chainMeta.GetChainID())
if util.AliasIsHdwalletKey(signer) {
addr := sk.PublicKey().Address()
signer = addr.String()
nonce, err = checkNonce(client, nonce, signer)
if err != nil {
return errors.Wrap(err, "failed to get nonce")
}
elp.SetNonce(nonce)
}
sealed, err := action.Sign(elp, sk)
if err != nil {
return errors.Wrap(err, "failed to sign action")
}
if err := isBalanceEnough(client, signer, sealed); err != nil {
return errors.Wrap(err, "failed to pass balance check")
}
selp := sealed.Proto()
sk.Zero()
actionInfo, err := printActionProto(client, selp)
if err != nil {
return errors.Wrap(err, "failed to print action proto message")
}
cmd.Println(actionInfo)
if !assumeYes {
infoWarn := selectTranslation(client, _infoWarn)
infoQuit := selectTranslation(client, _infoQuit)
confirmed, err := client.AskToConfirm(infoWarn)
if err != nil {
return errors.Wrap(err, "failed to ask confirm")
}
if !confirmed {
cmd.Println(infoQuit)
return nil
}
}
return SendRaw(client, cmd, selp)
}
// Execute sends signed execution's transaction to blockchain
func Execute(client ioctl.Client,
cmd *cobra.Command,
contract string,
amount *big.Int,
bytecode []byte,
gasPrice, signer, password string,
nonce, gasLimit uint64,
assumeYes bool,
) error {
if len(contract) == 0 && len(bytecode) == 0 {
return errors.New("failed to deploy contract with empty bytecode")
}
gasPriceRau, err := gasPriceInRau(client, gasPrice)
if err != nil {
return errors.Wrap(err, "failed to get gas price")
}
sender, err := Signer(client, signer)
if err != nil {
return errors.Wrap(err, "failed to get signer address")
}
nonce, err = checkNonce(client, nonce, sender)
if err != nil {
return errors.Wrap(err, "failed to get nonce")
}
tx, err := action.NewExecution(contract, nonce, amount, gasLimit, gasPriceRau, bytecode)
if err != nil || tx == nil {
return errors.Wrap(err, "failed to make a Execution instance")
}
if gasLimit == 0 {
tx, err = fixGasLimit(client, sender, tx)
if err != nil || tx == nil {
return errors.Wrap(err, "failed to fix Execution gas limit")
}
gasLimit = tx.GasLimit()
}
return SendAction(
client,
cmd,
(&action.EnvelopeBuilder{}).
SetNonce(nonce).
SetGasPrice(gasPriceRau).
SetGasLimit(gasLimit).
SetAction(tx).Build(),
sender,
password,
nonce,
assumeYes,
)
}
// Read reads smart contract on IoTeX blockchain
func Read(client ioctl.Client,
contract address.Address,
amount string,
bytecode []byte,
signer string,
gasLimit uint64,
) (string, error) {
cli, err := client.APIServiceClient()
if err != nil {
return "", errors.Wrap(err, "failed to connect to endpoint")
}
ctx := context.Background()
if jwtMD, err := util.JwtAuth(); err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
callerAddr, err := Signer(client, signer)
if err != nil {
return "", errors.Wrap(err, "failed to get signer address")
}
if callerAddr == "" {
callerAddr = address.ZeroAddress
}
res, err := cli.ReadContract(ctx,
&iotexapi.ReadContractRequest{
Execution: &iotextypes.Execution{
Amount: amount,
Contract: contract.String(),
Data: bytecode,
},
CallerAddress: callerAddr,
GasLimit: gasLimit,
},
)
if err != nil {
return "", handleClientRequestError(err, "ReadContract")
}
return res.Data, nil
}
func isBalanceEnough(client ioctl.Client, address string, act action.SealedEnvelope) error {
accountMeta, err := account.Meta(client, address)
if err != nil {
return errors.Wrap(err, "failed to get account meta")
}
balance, ok := new(big.Int).SetString(accountMeta.Balance, 10)
if !ok {
return errors.New("failed to convert balance into big int")
}
cost, err := act.Cost()
if err != nil {
return errors.Wrap(err, "failed to check cost of an action")
}
if balance.Cmp(cost) < 0 {
return errors.New("balance is not enough")
}
return nil
}
| conditional_block | ||
content_preservation.py | """EVALUATION OF CONTENT PRESERVATION
This code can be used for evaluation of content preservation between input and output sentiment texts of a style transfer model.
Word Mover's Distance (WMD) on texts with style masking (i.e. placeholders used in place of style words)
exhibited the highest correlation with human evaluations of the same texts.
Usage:
- Mask style words in a set of texts prior to evaluation -> mask_style_words(texts, mask_style=True)
- View correlations between automated metrics and human scores -> display_correlation_tables()
- Load WMD scores for output texts of examined style transfer models -> load_wmd_scores(...)
- Train a Word2Vec model for your dataset, for use in WMD calculation -> train_word2vec_model(...)
- Calculate WMD scores for your own input/output texts -> calculate_wmd_scores(...)
You can find examples of more detailed usage commands below.
"""
from gensim.models.word2vec import Word2Vec
from globals import MODEL_TO_PARAMS, MODEL_TO_PARAM_NAMES
from style_lexicon import load_lexicon
from tokenizer import tokenize
from utils import calculate_correlations, get_val_as_str, load_dataset, load_turk_scores, merge_datasets
import numpy as np
import math
# ASPECT = 'content_preservation'
# AUTOMATED_SCORES_PATH = '../evaluations/automated/content_preservation/sentence_level'
CUSTOM_STYLE = 'customstyle'
# STYLE_LEXICON = load_lexicon()
# STYLE_MODIFICATION_SETTINGS = ['style_masked', 'style_removed']
## DATA PREP
def mask_style_words(texts, style_tokens=None, mask_style=False):
'''
Mask or remove style words (based on a set of style tokens) from input texts.
Parameters
----------
texts : list
String inputs
style_tokens : set
Style tokens
mask_style : boolean
Set to False to remove style tokens, True to replace with placeholder
Returns
-------
edited_texts : list
Texts with style tokens masked or removed
'''
edited_texts = []
for text in texts:
tokens = tokenize(text)
edited_tokens = []
for token in tokens:
if token.lower() in style_tokens:
if mask_style:
edited_tokens.append(CUSTOM_STYLE)
else:
edited_tokens.append(token)
edited_texts.append(' '.join(edited_tokens))
return edited_texts
def generate_style_modified_texts(texts, style_lexicon):
# ensure consistent tokenization under different style modification settings
unmasked_texts = mask_style_words(texts, style_tokens={}, mask_style=False)
texts_with_style_removed = mask_style_words(texts, style_tokens=style_lexicon, mask_style=False)
texts_with_style_masked = mask_style_words(texts, style_tokens=style_lexicon, mask_style=True)
return unmasked_texts, texts_with_style_removed, texts_with_style_masked
## MODELS / SCORING OF WMD
def train_word2vec_model(texts, path):
tokenized_texts = []
for text in texts:
tokenized_texts.append(tokenize(text))
model = Word2Vec(tokenized_texts)
model.save(path)
def load_word2vec_model(path):
model = Word2Vec.load(path)
model.init_sims(replace=True) # normalize vectors
return model
def calculate_wmd_scores(references, candidates, wmd_model):
|
# def load_wmd_scores(model_name, param_val):
# '''
# Load pre-computed WMD scores for input and output texts under
# the style masking setting. (Style masking exhibited higher
# correlation with human scores than other settings).
# Parameters
# ----------
# model_name : str
# Name of style transfer model
# param_val : float
# Parameter on which the model was trained (see MODEL_TO_PARAMS for options)
# Returns
# -------
# List of WMD scores for all pairs of input and output texts
# '''
# param_name = MODEL_TO_PARAM_NAMES[model_name]
# string_val = get_val_as_str(param_val)
# metrics_path = '{AUTOMATED_SCORES_PATH}/{model_name}_{param_name}_{string_val}.npz'.format(AUTOMATED_SCORES_PATH=AUTOMATED_SCORES_PATH, model_name=model_name, param_name=param_name, string_val=string_val)
# return np.load(metrics_path)['style_masked'].item()['WMD']
## CALCULATION OF CORRELATIONS
# def display_correlation_tables():
# '''
# Display correlation of automated content preservation metrics with
# averaged human evaluation scores for examined style transfer models
# over texts under different style modification settings.
# '''
# for setting in STYLE_MODIFICATION_SETTINGS:
# print()
# print('[Setting: {setting.upper()}]')
# for model in MODEL_TO_PARAMS:
# print()
# print(model)
# param_name = MODEL_TO_PARAM_NAMES[model]
# param_values = MODEL_TO_PARAMS[model]
# metrics_scores_over_model_params = {}
# turk_scores_over_model_params = []
# for val in param_values:
# string_val = get_val_as_str(val)
# metrics_path = '{AUTOMATED_SCORES_PATH}/{model}_{param_name}_{string_val}.npz'.format(AUTOMATED_SCORES_PATH=AUTOMATED_SCORES_PATH, model=model, param_name=param_name, string_val=string_val)
# all_metrics = np.load(metrics_path)
# # load scores for style modification setting
# metrics = all_metrics[setting].item()
# # aggregate scores obtained over all model parameters
# for metric_name in metrics:
# # metric_values is a list of sentence-level scores
# metric_values = metrics[metric_name]
# metrics_scores_over_model_params.setdefault(metric_name, []).extend(metric_values)
# turk_scores_over_model_params.extend(load_turk_scores(ASPECT, model, param_name, string_val))
# correlation_tables = calculate_correlations(metrics_scores_over_model_params, turk_scores_over_model_params)
# print(correlation_tables.round(decimals=3).transpose())
# print()
# # EXAMPLE USAGE (uncomment the following to play around with code)
# # load data to train models used for WMD calculations
# all_texts = load_dataset('../data/sentiment.all')
# all_texts_style_masked = mask_style_words(all_texts, mask_style=True)
# # train models
# w2v_model_path = '../models/word2vec_unmasked'
# w2v_model_style_masked_path = '../models/word2vec_masked'
# train_word2vec_model(all_texts, w2v_model_path)
# train_word2vec_model(all_texts_style_masked, w2v_model_style_masked_path)
# w2v_model = load_word2vec_model(w2v_model_path)
# w2v_model_style_masked = load_word2vec_model(w2v_model_style_masked_path)
# # load texts under different style modification settings
# input_neg_texts = load_dataset('../data/sentiment.test.0')
# input_pos_texts = load_dataset('../data/sentiment.test.1')
# input_texts = merge_datasets(input_neg_texts, input_pos_texts)
# unmasked_inputs, inputs_with_style_removed, inputs_with_style_masked = generate_style_modified_texts(input_texts)
######calculate unmasked WMD scores!
def unmasked_toward_and_our_model_scores(w2v_model_path, sentence_path):
w2v_model = load_word2vec_model(w2v_model_path)
reference_sentence = []
candidates_sentence = []
with open(sentence_path,'r') as mixed_sentence:
for i,line in enumerate(mixed_sentence):
if i % 2 == 0:
reference_sentence.append(line.split("\n")[0])
else:
candidates_sentence.append(line.split("\n")[0])
wmd_scores = calculate_wmd_scores(reference_sentence, candidates_sentence, w2v_model)
all_wmd_scores = 0.0
nums_wms_scores = 0
for e in wmd_scores:
if not math.isinf(e):
all_wmd_scores += e
nums_wms_scores += 1
return all_wmd_scores, nums_wms_scores, all_wmd_scores/nums_wms_scores
def masked_toward_and_our_model_scores(w2v_model_path, sentence_path):
w2v_model = load_word2vec_model(w2v_model_path)
reference_sentence = []
candidates_sentence = []
with open(sentence_path,'r') as mixed_sentence:
for i,line in enumerate(mixed_sentence):
if i % 2==0:
reference_sentence.append(line.split("\n")[0])
else:
candidates_sentence.append(line.split("\n")[0])
_, _, reference_sentence_masked = generate_style_modified_texts(reference_sentence)
_, _, candidates_sentence_masked = generate_style_modified_texts(candidates_sentence)
refs = open("refs_masked.txt", "w")
hyps = open("hyps_masked.txt", "w")
for each in reference_sentence_masked:
refs.write(each+"\n")
for each in candidates_sentence_masked:
hyps.write(each+"\n")
masked_wmd_scores = calculate_wmd_scores(reference_sentence_masked, candidates_sentence_masked, w2v_model)
all_masked_wmd_scores = 0.0
nums_masked_wmd_scores = 0
for e in masked_wmd_scores:
if not math.isinf(e):
all_masked_wmd_scores += e
nums_masked_wmd_scores += 1
return all_masked_wmd_scores, nums_masked_wmd_scores, all_masked_wmd_scores/nums_masked_wmd_scores
if __name__ == "__main__":
w2v_model_path = '../models/word2vec_masked'
sentence_path = '../evaluation_results/Yelp/val.12'
alls, num, mean = masked_toward_and_our_model_scores(w2v_model_path, sentence_path)
| '''
Calculate Word Mover's Distance for each (reference, candidate)
pair in a list of reference texts and candidate texts.
The lower the distance, the more similar the texts are.
Parameters
----------
references : list
Input texts
candidates : list
Output texts (e.g. from a style transfer model)
wmd_model : gensim.models.word2vec.Word2Vec
Trained Word2Vec model
Returns
-------
wmd_scores : list
WMD scores for all pairs
'''
wmd_scores = []
for i in range(len(references)):
wmd = wmd_model.wv.wmdistance(tokenize(references[i]), tokenize(candidates[i]))
wmd_scores.append(wmd)
return wmd_scores | identifier_body |
content_preservation.py | """EVALUATION OF CONTENT PRESERVATION
This code can be used for evaluation of content preservation between input and output sentiment texts of a style transfer model.
Word Mover's Distance (WMD) on texts with style masking (i.e. placeholders used in place of style words)
exhibited the highest correlation with human evaluations of the same texts.
Usage:
- Mask style words in a set of texts prior to evaluation -> mask_style_words(texts, mask_style=True)
- View correlations between automated metrics and human scores -> display_correlation_tables()
- Load WMD scores for output texts of examined style transfer models -> load_wmd_scores(...)
- Train a Word2Vec model for your dataset, for use in WMD calculation -> train_word2vec_model(...)
- Calculate WMD scores for your own input/output texts -> calculate_wmd_scores(...)
You can find examples of more detailed usage commands below.
"""
from gensim.models.word2vec import Word2Vec
from globals import MODEL_TO_PARAMS, MODEL_TO_PARAM_NAMES
from style_lexicon import load_lexicon
from tokenizer import tokenize
from utils import calculate_correlations, get_val_as_str, load_dataset, load_turk_scores, merge_datasets
import numpy as np
import math
# ASPECT = 'content_preservation'
# AUTOMATED_SCORES_PATH = '../evaluations/automated/content_preservation/sentence_level'
CUSTOM_STYLE = 'customstyle'
# STYLE_LEXICON = load_lexicon()
# STYLE_MODIFICATION_SETTINGS = ['style_masked', 'style_removed']
## DATA PREP
def mask_style_words(texts, style_tokens=None, mask_style=False):
'''
Mask or remove style words (based on a set of style tokens) from input texts.
Parameters
----------
texts : list
String inputs
style_tokens : set
Style tokens
mask_style : boolean
Set to False to remove style tokens, True to replace with placeholder
Returns
-------
edited_texts : list
Texts with style tokens masked or removed
'''
edited_texts = []
for text in texts:
tokens = tokenize(text)
edited_tokens = []
for token in tokens:
if token.lower() in style_tokens:
if mask_style:
edited_tokens.append(CUSTOM_STYLE)
else:
edited_tokens.append(token)
edited_texts.append(' '.join(edited_tokens))
return edited_texts
def generate_style_modified_texts(texts, style_lexicon):
# ensure consistent tokenization under different style modification settings
unmasked_texts = mask_style_words(texts, style_tokens={}, mask_style=False)
texts_with_style_removed = mask_style_words(texts, style_tokens=style_lexicon, mask_style=False)
texts_with_style_masked = mask_style_words(texts, style_tokens=style_lexicon, mask_style=True)
return unmasked_texts, texts_with_style_removed, texts_with_style_masked
## MODELS / SCORING OF WMD
def train_word2vec_model(texts, path):
tokenized_texts = []
for text in texts:
tokenized_texts.append(tokenize(text))
model = Word2Vec(tokenized_texts)
model.save(path)
def load_word2vec_model(path):
model = Word2Vec.load(path)
model.init_sims(replace=True) # normalize vectors
return model
def | (references, candidates, wmd_model):
'''
Calculate Word Mover's Distance for each (reference, candidate)
pair in a list of reference texts and candidate texts.
The lower the distance, the more similar the texts are.
Parameters
----------
references : list
Input texts
candidates : list
Output texts (e.g. from a style transfer model)
wmd_model : gensim.models.word2vec.Word2Vec
Trained Word2Vec model
Returns
-------
wmd_scores : list
WMD scores for all pairs
'''
wmd_scores = []
for i in range(len(references)):
wmd = wmd_model.wv.wmdistance(tokenize(references[i]), tokenize(candidates[i]))
wmd_scores.append(wmd)
return wmd_scores
# def load_wmd_scores(model_name, param_val):
# '''
# Load pre-computed WMD scores for input and output texts under
# the style masking setting. (Style masking exhibited higher
# correlation with human scores than other settings).
# Parameters
# ----------
# model_name : str
# Name of style transfer model
# param_val : float
# Parameter on which the model was trained (see MODEL_TO_PARAMS for options)
# Returns
# -------
# List of WMD scores for all pairs of input and output texts
# '''
# param_name = MODEL_TO_PARAM_NAMES[model_name]
# string_val = get_val_as_str(param_val)
# metrics_path = '{AUTOMATED_SCORES_PATH}/{model_name}_{param_name}_{string_val}.npz'.format(AUTOMATED_SCORES_PATH=AUTOMATED_SCORES_PATH, model_name=model_name, param_name=param_name, string_val=string_val)
# return np.load(metrics_path)['style_masked'].item()['WMD']
## CALCULATION OF CORRELATIONS
# def display_correlation_tables():
# '''
# Display correlation of automated content preservation metrics with
# averaged human evaluation scores for examined style transfer models
# over texts under different style modification settings.
# '''
# for setting in STYLE_MODIFICATION_SETTINGS:
# print()
# print('[Setting: {setting.upper()}]')
# for model in MODEL_TO_PARAMS:
# print()
# print(model)
# param_name = MODEL_TO_PARAM_NAMES[model]
# param_values = MODEL_TO_PARAMS[model]
# metrics_scores_over_model_params = {}
# turk_scores_over_model_params = []
# for val in param_values:
# string_val = get_val_as_str(val)
# metrics_path = '{AUTOMATED_SCORES_PATH}/{model}_{param_name}_{string_val}.npz'.format(AUTOMATED_SCORES_PATH=AUTOMATED_SCORES_PATH, model=model, param_name=param_name, string_val=string_val)
# all_metrics = np.load(metrics_path)
# # load scores for style modification setting
# metrics = all_metrics[setting].item()
# # aggregate scores obtained over all model parameters
# for metric_name in metrics:
# # metric_values is a list of sentence-level scores
# metric_values = metrics[metric_name]
# metrics_scores_over_model_params.setdefault(metric_name, []).extend(metric_values)
# turk_scores_over_model_params.extend(load_turk_scores(ASPECT, model, param_name, string_val))
# correlation_tables = calculate_correlations(metrics_scores_over_model_params, turk_scores_over_model_params)
# print(correlation_tables.round(decimals=3).transpose())
# print()
# # EXAMPLE USAGE (uncomment the following to play around with code)
# # load data to train models used for WMD calculations
# all_texts = load_dataset('../data/sentiment.all')
# all_texts_style_masked = mask_style_words(all_texts, mask_style=True)
# # train models
# w2v_model_path = '../models/word2vec_unmasked'
# w2v_model_style_masked_path = '../models/word2vec_masked'
# train_word2vec_model(all_texts, w2v_model_path)
# train_word2vec_model(all_texts_style_masked, w2v_model_style_masked_path)
# w2v_model = load_word2vec_model(w2v_model_path)
# w2v_model_style_masked = load_word2vec_model(w2v_model_style_masked_path)
# # load texts under different style modification settings
# input_neg_texts = load_dataset('../data/sentiment.test.0')
# input_pos_texts = load_dataset('../data/sentiment.test.1')
# input_texts = merge_datasets(input_neg_texts, input_pos_texts)
# unmasked_inputs, inputs_with_style_removed, inputs_with_style_masked = generate_style_modified_texts(input_texts)
######calculate unmasked WMD scores!
def unmasked_toward_and_our_model_scores(w2v_model_path, sentence_path):
w2v_model = load_word2vec_model(w2v_model_path)
reference_sentence = []
candidates_sentence = []
with open(sentence_path,'r') as mixed_sentence:
for i,line in enumerate(mixed_sentence):
if i % 2 == 0:
reference_sentence.append(line.split("\n")[0])
else:
candidates_sentence.append(line.split("\n")[0])
wmd_scores = calculate_wmd_scores(reference_sentence, candidates_sentence, w2v_model)
all_wmd_scores = 0.0
nums_wms_scores = 0
for e in wmd_scores:
if not math.isinf(e):
all_wmd_scores += e
nums_wms_scores += 1
return all_wmd_scores, nums_wms_scores, all_wmd_scores/nums_wms_scores
def masked_toward_and_our_model_scores(w2v_model_path, sentence_path):
w2v_model = load_word2vec_model(w2v_model_path)
reference_sentence = []
candidates_sentence = []
with open(sentence_path,'r') as mixed_sentence:
for i,line in enumerate(mixed_sentence):
if i % 2==0:
reference_sentence.append(line.split("\n")[0])
else:
candidates_sentence.append(line.split("\n")[0])
_, _, reference_sentence_masked = generate_style_modified_texts(reference_sentence)
_, _, candidates_sentence_masked = generate_style_modified_texts(candidates_sentence)
refs = open("refs_masked.txt", "w")
hyps = open("hyps_masked.txt", "w")
for each in reference_sentence_masked:
refs.write(each+"\n")
for each in candidates_sentence_masked:
hyps.write(each+"\n")
masked_wmd_scores = calculate_wmd_scores(reference_sentence_masked, candidates_sentence_masked, w2v_model)
all_masked_wmd_scores = 0.0
nums_masked_wmd_scores = 0
for e in masked_wmd_scores:
if not math.isinf(e):
all_masked_wmd_scores += e
nums_masked_wmd_scores += 1
return all_masked_wmd_scores, nums_masked_wmd_scores, all_masked_wmd_scores/nums_masked_wmd_scores
if __name__ == "__main__":
w2v_model_path = '../models/word2vec_masked'
sentence_path = '../evaluation_results/Yelp/val.12'
alls, num, mean = masked_toward_and_our_model_scores(w2v_model_path, sentence_path)
| calculate_wmd_scores | identifier_name |
content_preservation.py | """EVALUATION OF CONTENT PRESERVATION
This code can be used for evaluation of content preservation between input and output sentiment texts of a style transfer model.
Word Mover's Distance (WMD) on texts with style masking (i.e. placeholders used in place of style words)
exhibited the highest correlation with human evaluations of the same texts.
Usage:
- Mask style words in a set of texts prior to evaluation -> mask_style_words(texts, mask_style=True)
- View correlations between automated metrics and human scores -> display_correlation_tables()
- Load WMD scores for output texts of examined style transfer models -> load_wmd_scores(...)
- Train a Word2Vec model for your dataset, for use in WMD calculation -> train_word2vec_model(...)
- Calculate WMD scores for your own input/output texts -> calculate_wmd_scores(...)
You can find examples of more detailed usage commands below.
"""
from gensim.models.word2vec import Word2Vec
from globals import MODEL_TO_PARAMS, MODEL_TO_PARAM_NAMES
from style_lexicon import load_lexicon
from tokenizer import tokenize
from utils import calculate_correlations, get_val_as_str, load_dataset, load_turk_scores, merge_datasets
import numpy as np
import math
# ASPECT = 'content_preservation'
# AUTOMATED_SCORES_PATH = '../evaluations/automated/content_preservation/sentence_level'
CUSTOM_STYLE = 'customstyle'
# STYLE_LEXICON = load_lexicon()
# STYLE_MODIFICATION_SETTINGS = ['style_masked', 'style_removed']
## DATA PREP
def mask_style_words(texts, style_tokens=None, mask_style=False):
'''
Mask or remove style words (based on a set of style tokens) from input texts.
Parameters
----------
texts : list
String inputs
style_tokens : set
Style tokens
mask_style : boolean
Set to False to remove style tokens, True to replace with placeholder
Returns
-------
edited_texts : list
Texts with style tokens masked or removed
'''
edited_texts = []
for text in texts:
tokens = tokenize(text)
edited_tokens = []
for token in tokens:
if token.lower() in style_tokens:
if mask_style:
edited_tokens.append(CUSTOM_STYLE)
else:
edited_tokens.append(token)
edited_texts.append(' '.join(edited_tokens))
return edited_texts
def generate_style_modified_texts(texts, style_lexicon):
# ensure consistent tokenization under different style modification settings
unmasked_texts = mask_style_words(texts, style_tokens={}, mask_style=False)
texts_with_style_removed = mask_style_words(texts, style_tokens=style_lexicon, mask_style=False)
texts_with_style_masked = mask_style_words(texts, style_tokens=style_lexicon, mask_style=True) | tokenized_texts = []
for text in texts:
tokenized_texts.append(tokenize(text))
model = Word2Vec(tokenized_texts)
model.save(path)
def load_word2vec_model(path):
model = Word2Vec.load(path)
model.init_sims(replace=True) # normalize vectors
return model
def calculate_wmd_scores(references, candidates, wmd_model):
'''
Calculate Word Mover's Distance for each (reference, candidate)
pair in a list of reference texts and candidate texts.
The lower the distance, the more similar the texts are.
Parameters
----------
references : list
Input texts
candidates : list
Output texts (e.g. from a style transfer model)
wmd_model : gensim.models.word2vec.Word2Vec
Trained Word2Vec model
Returns
-------
wmd_scores : list
WMD scores for all pairs
'''
wmd_scores = []
for i in range(len(references)):
wmd = wmd_model.wv.wmdistance(tokenize(references[i]), tokenize(candidates[i]))
wmd_scores.append(wmd)
return wmd_scores
# def load_wmd_scores(model_name, param_val):
# '''
# Load pre-computed WMD scores for input and output texts under
# the style masking setting. (Style masking exhibited higher
# correlation with human scores than other settings).
# Parameters
# ----------
# model_name : str
# Name of style transfer model
# param_val : float
# Parameter on which the model was trained (see MODEL_TO_PARAMS for options)
# Returns
# -------
# List of WMD scores for all pairs of input and output texts
# '''
# param_name = MODEL_TO_PARAM_NAMES[model_name]
# string_val = get_val_as_str(param_val)
# metrics_path = '{AUTOMATED_SCORES_PATH}/{model_name}_{param_name}_{string_val}.npz'.format(AUTOMATED_SCORES_PATH=AUTOMATED_SCORES_PATH, model_name=model_name, param_name=param_name, string_val=string_val)
# return np.load(metrics_path)['style_masked'].item()['WMD']
## CALCULATION OF CORRELATIONS
# def display_correlation_tables():
# '''
# Display correlation of automated content preservation metrics with
# averaged human evaluation scores for examined style transfer models
# over texts under different style modification settings.
# '''
# for setting in STYLE_MODIFICATION_SETTINGS:
# print()
# print('[Setting: {setting.upper()}]')
# for model in MODEL_TO_PARAMS:
# print()
# print(model)
# param_name = MODEL_TO_PARAM_NAMES[model]
# param_values = MODEL_TO_PARAMS[model]
# metrics_scores_over_model_params = {}
# turk_scores_over_model_params = []
# for val in param_values:
# string_val = get_val_as_str(val)
# metrics_path = '{AUTOMATED_SCORES_PATH}/{model}_{param_name}_{string_val}.npz'.format(AUTOMATED_SCORES_PATH=AUTOMATED_SCORES_PATH, model=model, param_name=param_name, string_val=string_val)
# all_metrics = np.load(metrics_path)
# # load scores for style modification setting
# metrics = all_metrics[setting].item()
# # aggregate scores obtained over all model parameters
# for metric_name in metrics:
# # metric_values is a list of sentence-level scores
# metric_values = metrics[metric_name]
# metrics_scores_over_model_params.setdefault(metric_name, []).extend(metric_values)
# turk_scores_over_model_params.extend(load_turk_scores(ASPECT, model, param_name, string_val))
# correlation_tables = calculate_correlations(metrics_scores_over_model_params, turk_scores_over_model_params)
# print(correlation_tables.round(decimals=3).transpose())
# print()
# # EXAMPLE USAGE (uncomment the following to play around with code)
# # load data to train models used for WMD calculations
# all_texts = load_dataset('../data/sentiment.all')
# all_texts_style_masked = mask_style_words(all_texts, mask_style=True)
# # train models
# w2v_model_path = '../models/word2vec_unmasked'
# w2v_model_style_masked_path = '../models/word2vec_masked'
# train_word2vec_model(all_texts, w2v_model_path)
# train_word2vec_model(all_texts_style_masked, w2v_model_style_masked_path)
# w2v_model = load_word2vec_model(w2v_model_path)
# w2v_model_style_masked = load_word2vec_model(w2v_model_style_masked_path)
# # load texts under different style modification settings
# input_neg_texts = load_dataset('../data/sentiment.test.0')
# input_pos_texts = load_dataset('../data/sentiment.test.1')
# input_texts = merge_datasets(input_neg_texts, input_pos_texts)
# unmasked_inputs, inputs_with_style_removed, inputs_with_style_masked = generate_style_modified_texts(input_texts)
######calculate unmasked WMD scores!
def unmasked_toward_and_our_model_scores(w2v_model_path, sentence_path):
w2v_model = load_word2vec_model(w2v_model_path)
reference_sentence = []
candidates_sentence = []
with open(sentence_path,'r') as mixed_sentence:
for i,line in enumerate(mixed_sentence):
if i % 2 == 0:
reference_sentence.append(line.split("\n")[0])
else:
candidates_sentence.append(line.split("\n")[0])
wmd_scores = calculate_wmd_scores(reference_sentence, candidates_sentence, w2v_model)
all_wmd_scores = 0.0
nums_wms_scores = 0
for e in wmd_scores:
if not math.isinf(e):
all_wmd_scores += e
nums_wms_scores += 1
return all_wmd_scores, nums_wms_scores, all_wmd_scores/nums_wms_scores
def masked_toward_and_our_model_scores(w2v_model_path, sentence_path):
w2v_model = load_word2vec_model(w2v_model_path)
reference_sentence = []
candidates_sentence = []
with open(sentence_path,'r') as mixed_sentence:
for i,line in enumerate(mixed_sentence):
if i % 2==0:
reference_sentence.append(line.split("\n")[0])
else:
candidates_sentence.append(line.split("\n")[0])
_, _, reference_sentence_masked = generate_style_modified_texts(reference_sentence)
_, _, candidates_sentence_masked = generate_style_modified_texts(candidates_sentence)
refs = open("refs_masked.txt", "w")
hyps = open("hyps_masked.txt", "w")
for each in reference_sentence_masked:
refs.write(each+"\n")
for each in candidates_sentence_masked:
hyps.write(each+"\n")
masked_wmd_scores = calculate_wmd_scores(reference_sentence_masked, candidates_sentence_masked, w2v_model)
all_masked_wmd_scores = 0.0
nums_masked_wmd_scores = 0
for e in masked_wmd_scores:
if not math.isinf(e):
all_masked_wmd_scores += e
nums_masked_wmd_scores += 1
return all_masked_wmd_scores, nums_masked_wmd_scores, all_masked_wmd_scores/nums_masked_wmd_scores
if __name__ == "__main__":
w2v_model_path = '../models/word2vec_masked'
sentence_path = '../evaluation_results/Yelp/val.12'
alls, num, mean = masked_toward_and_our_model_scores(w2v_model_path, sentence_path) | return unmasked_texts, texts_with_style_removed, texts_with_style_masked
## MODELS / SCORING OF WMD
def train_word2vec_model(texts, path): | random_line_split |
content_preservation.py | """EVALUATION OF CONTENT PRESERVATION
This code can be used for evaluation of content preservation between input and output sentiment texts of a style transfer model.
Word Mover's Distance (WMD) on texts with style masking (i.e. placeholders used in place of style words)
exhibited the highest correlation with human evaluations of the same texts.
Usage:
- Mask style words in a set of texts prior to evaluation -> mask_style_words(texts, mask_style=True)
- View correlations between automated metrics and human scores -> display_correlation_tables()
- Load WMD scores for output texts of examined style transfer models -> load_wmd_scores(...)
- Train a Word2Vec model for your dataset, for use in WMD calculation -> train_word2vec_model(...)
- Calculate WMD scores for your own input/output texts -> calculate_wmd_scores(...)
You can find examples of more detailed usage commands below.
"""
from gensim.models.word2vec import Word2Vec
from globals import MODEL_TO_PARAMS, MODEL_TO_PARAM_NAMES
from style_lexicon import load_lexicon
from tokenizer import tokenize
from utils import calculate_correlations, get_val_as_str, load_dataset, load_turk_scores, merge_datasets
import numpy as np
import math
# ASPECT = 'content_preservation'
# AUTOMATED_SCORES_PATH = '../evaluations/automated/content_preservation/sentence_level'
CUSTOM_STYLE = 'customstyle'
# STYLE_LEXICON = load_lexicon()
# STYLE_MODIFICATION_SETTINGS = ['style_masked', 'style_removed']
## DATA PREP
def mask_style_words(texts, style_tokens=None, mask_style=False):
'''
Mask or remove style words (based on a set of style tokens) from input texts.
Parameters
----------
texts : list
String inputs
style_tokens : set
Style tokens
mask_style : boolean
Set to False to remove style tokens, True to replace with placeholder
Returns
-------
edited_texts : list
Texts with style tokens masked or removed
'''
edited_texts = []
for text in texts:
tokens = tokenize(text)
edited_tokens = []
for token in tokens:
|
edited_texts.append(' '.join(edited_tokens))
return edited_texts
def generate_style_modified_texts(texts, style_lexicon):
# ensure consistent tokenization under different style modification settings
unmasked_texts = mask_style_words(texts, style_tokens={}, mask_style=False)
texts_with_style_removed = mask_style_words(texts, style_tokens=style_lexicon, mask_style=False)
texts_with_style_masked = mask_style_words(texts, style_tokens=style_lexicon, mask_style=True)
return unmasked_texts, texts_with_style_removed, texts_with_style_masked
## MODELS / SCORING OF WMD
def train_word2vec_model(texts, path):
tokenized_texts = []
for text in texts:
tokenized_texts.append(tokenize(text))
model = Word2Vec(tokenized_texts)
model.save(path)
def load_word2vec_model(path):
model = Word2Vec.load(path)
model.init_sims(replace=True) # normalize vectors
return model
def calculate_wmd_scores(references, candidates, wmd_model):
'''
Calculate Word Mover's Distance for each (reference, candidate)
pair in a list of reference texts and candidate texts.
The lower the distance, the more similar the texts are.
Parameters
----------
references : list
Input texts
candidates : list
Output texts (e.g. from a style transfer model)
wmd_model : gensim.models.word2vec.Word2Vec
Trained Word2Vec model
Returns
-------
wmd_scores : list
WMD scores for all pairs
'''
wmd_scores = []
for i in range(len(references)):
wmd = wmd_model.wv.wmdistance(tokenize(references[i]), tokenize(candidates[i]))
wmd_scores.append(wmd)
return wmd_scores
# def load_wmd_scores(model_name, param_val):
# '''
# Load pre-computed WMD scores for input and output texts under
# the style masking setting. (Style masking exhibited higher
# correlation with human scores than other settings).
# Parameters
# ----------
# model_name : str
# Name of style transfer model
# param_val : float
# Parameter on which the model was trained (see MODEL_TO_PARAMS for options)
# Returns
# -------
# List of WMD scores for all pairs of input and output texts
# '''
# param_name = MODEL_TO_PARAM_NAMES[model_name]
# string_val = get_val_as_str(param_val)
# metrics_path = '{AUTOMATED_SCORES_PATH}/{model_name}_{param_name}_{string_val}.npz'.format(AUTOMATED_SCORES_PATH=AUTOMATED_SCORES_PATH, model_name=model_name, param_name=param_name, string_val=string_val)
# return np.load(metrics_path)['style_masked'].item()['WMD']
## CALCULATION OF CORRELATIONS
# def display_correlation_tables():
# '''
# Display correlation of automated content preservation metrics with
# averaged human evaluation scores for examined style transfer models
# over texts under different style modification settings.
# '''
# for setting in STYLE_MODIFICATION_SETTINGS:
# print()
# print('[Setting: {setting.upper()}]')
# for model in MODEL_TO_PARAMS:
# print()
# print(model)
# param_name = MODEL_TO_PARAM_NAMES[model]
# param_values = MODEL_TO_PARAMS[model]
# metrics_scores_over_model_params = {}
# turk_scores_over_model_params = []
# for val in param_values:
# string_val = get_val_as_str(val)
# metrics_path = '{AUTOMATED_SCORES_PATH}/{model}_{param_name}_{string_val}.npz'.format(AUTOMATED_SCORES_PATH=AUTOMATED_SCORES_PATH, model=model, param_name=param_name, string_val=string_val)
# all_metrics = np.load(metrics_path)
# # load scores for style modification setting
# metrics = all_metrics[setting].item()
# # aggregate scores obtained over all model parameters
# for metric_name in metrics:
# # metric_values is a list of sentence-level scores
# metric_values = metrics[metric_name]
# metrics_scores_over_model_params.setdefault(metric_name, []).extend(metric_values)
# turk_scores_over_model_params.extend(load_turk_scores(ASPECT, model, param_name, string_val))
# correlation_tables = calculate_correlations(metrics_scores_over_model_params, turk_scores_over_model_params)
# print(correlation_tables.round(decimals=3).transpose())
# print()
# # EXAMPLE USAGE (uncomment the following to play around with code)
# # load data to train models used for WMD calculations
# all_texts = load_dataset('../data/sentiment.all')
# all_texts_style_masked = mask_style_words(all_texts, mask_style=True)
# # train models
# w2v_model_path = '../models/word2vec_unmasked'
# w2v_model_style_masked_path = '../models/word2vec_masked'
# train_word2vec_model(all_texts, w2v_model_path)
# train_word2vec_model(all_texts_style_masked, w2v_model_style_masked_path)
# w2v_model = load_word2vec_model(w2v_model_path)
# w2v_model_style_masked = load_word2vec_model(w2v_model_style_masked_path)
# # load texts under different style modification settings
# input_neg_texts = load_dataset('../data/sentiment.test.0')
# input_pos_texts = load_dataset('../data/sentiment.test.1')
# input_texts = merge_datasets(input_neg_texts, input_pos_texts)
# unmasked_inputs, inputs_with_style_removed, inputs_with_style_masked = generate_style_modified_texts(input_texts)
######calculate unmasked WMD scores!
def unmasked_toward_and_our_model_scores(w2v_model_path, sentence_path):
w2v_model = load_word2vec_model(w2v_model_path)
reference_sentence = []
candidates_sentence = []
with open(sentence_path,'r') as mixed_sentence:
for i,line in enumerate(mixed_sentence):
if i % 2 == 0:
reference_sentence.append(line.split("\n")[0])
else:
candidates_sentence.append(line.split("\n")[0])
wmd_scores = calculate_wmd_scores(reference_sentence, candidates_sentence, w2v_model)
all_wmd_scores = 0.0
nums_wms_scores = 0
for e in wmd_scores:
if not math.isinf(e):
all_wmd_scores += e
nums_wms_scores += 1
return all_wmd_scores, nums_wms_scores, all_wmd_scores/nums_wms_scores
def masked_toward_and_our_model_scores(w2v_model_path, sentence_path):
w2v_model = load_word2vec_model(w2v_model_path)
reference_sentence = []
candidates_sentence = []
with open(sentence_path,'r') as mixed_sentence:
for i,line in enumerate(mixed_sentence):
if i % 2==0:
reference_sentence.append(line.split("\n")[0])
else:
candidates_sentence.append(line.split("\n")[0])
_, _, reference_sentence_masked = generate_style_modified_texts(reference_sentence)
_, _, candidates_sentence_masked = generate_style_modified_texts(candidates_sentence)
refs = open("refs_masked.txt", "w")
hyps = open("hyps_masked.txt", "w")
for each in reference_sentence_masked:
refs.write(each+"\n")
for each in candidates_sentence_masked:
hyps.write(each+"\n")
masked_wmd_scores = calculate_wmd_scores(reference_sentence_masked, candidates_sentence_masked, w2v_model)
all_masked_wmd_scores = 0.0
nums_masked_wmd_scores = 0
for e in masked_wmd_scores:
if not math.isinf(e):
all_masked_wmd_scores += e
nums_masked_wmd_scores += 1
return all_masked_wmd_scores, nums_masked_wmd_scores, all_masked_wmd_scores/nums_masked_wmd_scores
if __name__ == "__main__":
w2v_model_path = '../models/word2vec_masked'
sentence_path = '../evaluation_results/Yelp/val.12'
alls, num, mean = masked_toward_and_our_model_scores(w2v_model_path, sentence_path)
| if token.lower() in style_tokens:
if mask_style:
edited_tokens.append(CUSTOM_STYLE)
else:
edited_tokens.append(token) | conditional_block |
query.rs | use std::borrow::{Borrow, Cow};
use std::collections::HashMap;
use std::fmt;
use std::iter::FromIterator;
use std::hash::{BuildHasher, Hash};
use std::rc::Rc;
use std::sync::Arc;
use serde::de;
use serde::Deserializer;
/// Allows access to the query parameters in an url or a body.
///
/// Use one of the listed implementations below. Since those may be a bit confusing due to their
/// abundant use of generics, basically use any type of `HashMap` that maps 'str-likes' to a
/// collection of other 'str-likes'. Popular instances may be:
/// * `HashMap<String, String>`
/// * `HashMap<String, Vec<String>>`
/// * `HashMap<Cow<'static, str>, Cow<'static, str>>`
///
/// You should generally not have to implement this trait yourself, and if you do there are
/// additional requirements on your implementation to guarantee standard conformance. Therefore the
/// trait is marked as `unsafe`.
pub unsafe trait QueryParameter {
/// Get the **unique** value associated with a key.
///
/// If there are multiple values, return `None`. This is very important to guarantee
/// conformance to the RFC. Afaik it prevents potentially subverting validation middleware,
/// order dependent processing, or simple confusion between different components who parse the
/// query string from different ends.
fn unique_value(&self, key: &str) -> Option<Cow<str>>;
/// Guarantees that one can grab an owned copy.
fn normalize(&self) -> NormalizedParameter;
}
/// The query parameter normal form.
///
/// When a request wants to give access to its query or body parameters by reference, it can do so
/// by a reference of the particular trait. But when the representation of the query is not stored
/// in the memory associated with the request, it needs to be allocated to outlive the borrow on
/// the request. This allocation may as well perform the minimization/normalization into a
/// representation actually consumed by the backend. This normal form thus encapsulates the
/// associated `clone-into-normal form` by various possible constructors from references [WIP].
///
/// This gives rise to a custom `Cow<QueryParameter>` instance by requiring that normalization into
/// memory with unrelated lifetime is always possible.
///
/// Internally a hashmap but this may change due to optimizations.
#[derive(Clone, Debug, Default)]
pub struct NormalizedParameter {
/// The value is `None` if the key appeared at least twice.
inner: HashMap<Cow<'static, str>, Option<Cow<'static, str>>>,
}
unsafe impl QueryParameter for NormalizedParameter {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.inner
.get(key)
.and_then(|val| val.as_ref().map(Cow::as_ref).map(Cow::Borrowed))
}
fn normalize(&self) -> NormalizedParameter {
self.clone()
}
}
impl NormalizedParameter {
/// Create an empty map.
pub fn new() -> Self {
NormalizedParameter::default()
}
/// Insert a key-value-pair or mark key as dead if already present.
///
/// Since each key must appear at most once, we do not remove it from the map but instead mark
/// the key as having a duplicate entry.
pub fn insert_or_poison(&mut self, key: Cow<'static, str>, val: Cow<'static, str>) {
let unique_val = Some(val);
self.inner
.entry(key)
.and_modify(|val| *val = None)
.or_insert(unique_val);
}
}
impl Borrow<dyn QueryParameter> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + 'static) {
self
}
}
impl Borrow<dyn QueryParameter + Send> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + Send + 'static) {
self
}
}
impl<'de> de::Deserialize<'de> for NormalizedParameter {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor(NormalizedParameter);
impl<'a> de::Visitor<'a> for Visitor {
type Value = NormalizedParameter;
fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "a sequence of key-value-pairs")
}
fn visit_seq<A>(mut self, mut access: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'a>,
{ | self.0.insert_or_poison(key.into(), value.into())
}
Ok(self.0)
}
}
let visitor = Visitor(NormalizedParameter::default());
deserializer.deserialize_seq(visitor)
}
}
impl<K, V> FromIterator<(K, V)> for NormalizedParameter
where
K: Into<Cow<'static, str>>,
V: Into<Cow<'static, str>>,
{
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (K, V)>,
{
let mut target = NormalizedParameter::default();
iter.into_iter()
.for_each(|(k, v)| target.insert_or_poison(k.into(), v.into()));
target
}
}
impl ToOwned for dyn QueryParameter {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
impl ToOwned for dyn QueryParameter + Send {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
/// Return a reference to value in a collection if it is the only one.
///
/// For example, a vector of string like types returns a reference to its first
/// element if there are no other, else it returns `None`.
///
/// If this were done with slices, that would require choosing a particular
/// value type of the underlying slice e.g. `[String]`.
pub unsafe trait UniqueValue {
/// Borrow the unique value reference.
fn get_unique(&self) -> Option<&str>;
}
unsafe impl<K, V, S: BuildHasher> QueryParameter for HashMap<K, V, S>
where
K: Borrow<str> + Eq + Hash,
V: UniqueValue + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.get(key).and_then(V::get_unique).map(Cow::Borrowed)
}
fn normalize(&self) -> NormalizedParameter {
let inner = self
.iter()
.filter_map(|(key, val)| {
val.get_unique().map(|value| {
(
Cow::Owned(key.borrow().to_string()),
Some(Cow::Owned(value.to_string())),
)
})
})
.collect();
NormalizedParameter { inner }
}
}
unsafe impl<K, V> QueryParameter for Vec<(K, V)>
where
K: Borrow<str> + Eq + Hash,
V: Borrow<str> + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
let mut value = None;
for entry in self.iter() {
if entry.0.borrow() == key {
if value.is_some() {
return None;
}
value = Some(Cow::Borrowed(entry.1.borrow()));
}
}
value
}
fn normalize(&self) -> NormalizedParameter {
let mut params = NormalizedParameter::default();
self.iter()
.map(|&(ref key, ref val)| {
(
Cow::Owned(key.borrow().to_string()),
Cow::Owned(val.borrow().to_string()),
)
})
.for_each(|(key, val)| params.insert_or_poison(key, val));
params
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a mut Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl UniqueValue for str {
fn get_unique(&self) -> Option<&str> {
Some(self)
}
}
unsafe impl UniqueValue for String {
fn get_unique(&self) -> Option<&str> {
Some(&self)
}
}
unsafe impl<'a, V> UniqueValue for &'a V
where
V: AsRef<str> + ?Sized,
{
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<'a> UniqueValue for Cow<'a, str> {
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<V: UniqueValue> UniqueValue for Option<V> {
fn get_unique(&self) -> Option<&str> {
self.as_ref().and_then(V::get_unique)
}
}
unsafe impl<V: UniqueValue> UniqueValue for [V] {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Box<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Rc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Arc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue> UniqueValue for Vec<V> {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
mod test {
use super::*;
/// Compilation tests for various possible QueryParameter impls.
#[allow(unused)]
#[allow(dead_code)]
fn test_query_parameter_impls() {
let _ = (&HashMap::<String, String>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<&'static str, &'static str>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<Cow<'static, str>, Cow<'static, str>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Vec<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<[Cow<'static, str>]>>::new()) as &dyn QueryParameter;
}
} | while let Some((key, value)) = access.next_element::<(String, String)>()? { | random_line_split |
query.rs | use std::borrow::{Borrow, Cow};
use std::collections::HashMap;
use std::fmt;
use std::iter::FromIterator;
use std::hash::{BuildHasher, Hash};
use std::rc::Rc;
use std::sync::Arc;
use serde::de;
use serde::Deserializer;
/// Allows access to the query parameters in an url or a body.
///
/// Use one of the listed implementations below. Since those may be a bit confusing due to their
/// abundant use of generics, basically use any type of `HashMap` that maps 'str-likes' to a
/// collection of other 'str-likes'. Popular instances may be:
/// * `HashMap<String, String>`
/// * `HashMap<String, Vec<String>>`
/// * `HashMap<Cow<'static, str>, Cow<'static, str>>`
///
/// You should generally not have to implement this trait yourself, and if you do there are
/// additional requirements on your implementation to guarantee standard conformance. Therefore the
/// trait is marked as `unsafe`.
pub unsafe trait QueryParameter {
/// Get the **unique** value associated with a key.
///
/// If there are multiple values, return `None`. This is very important to guarantee
/// conformance to the RFC. Afaik it prevents potentially subverting validation middleware,
/// order dependent processing, or simple confusion between different components who parse the
/// query string from different ends.
fn unique_value(&self, key: &str) -> Option<Cow<str>>;
/// Guarantees that one can grab an owned copy.
fn normalize(&self) -> NormalizedParameter;
}
/// The query parameter normal form.
///
/// When a request wants to give access to its query or body parameters by reference, it can do so
/// by a reference of the particular trait. But when the representation of the query is not stored
/// in the memory associated with the request, it needs to be allocated to outlive the borrow on
/// the request. This allocation may as well perform the minimization/normalization into a
/// representation actually consumed by the backend. This normal form thus encapsulates the
/// associated `clone-into-normal form` by various possible constructors from references [WIP].
///
/// This gives rise to a custom `Cow<QueryParameter>` instance by requiring that normalization into
/// memory with unrelated lifetime is always possible.
///
/// Internally a hashmap but this may change due to optimizations.
#[derive(Clone, Debug, Default)]
pub struct NormalizedParameter {
/// The value is `None` if the key appeared at least twice.
inner: HashMap<Cow<'static, str>, Option<Cow<'static, str>>>,
}
unsafe impl QueryParameter for NormalizedParameter {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.inner
.get(key)
.and_then(|val| val.as_ref().map(Cow::as_ref).map(Cow::Borrowed))
}
fn normalize(&self) -> NormalizedParameter {
self.clone()
}
}
impl NormalizedParameter {
/// Create an empty map.
pub fn new() -> Self {
NormalizedParameter::default()
}
/// Insert a key-value-pair or mark key as dead if already present.
///
/// Since each key must appear at most once, we do not remove it from the map but instead mark
/// the key as having a duplicate entry.
pub fn insert_or_poison(&mut self, key: Cow<'static, str>, val: Cow<'static, str>) {
let unique_val = Some(val);
self.inner
.entry(key)
.and_modify(|val| *val = None)
.or_insert(unique_val);
}
}
impl Borrow<dyn QueryParameter> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + 'static) {
self
}
}
impl Borrow<dyn QueryParameter + Send> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + Send + 'static) {
self
}
}
impl<'de> de::Deserialize<'de> for NormalizedParameter {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor(NormalizedParameter);
impl<'a> de::Visitor<'a> for Visitor {
type Value = NormalizedParameter;
fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "a sequence of key-value-pairs")
}
fn visit_seq<A>(mut self, mut access: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'a>,
{
while let Some((key, value)) = access.next_element::<(String, String)>()? {
self.0.insert_or_poison(key.into(), value.into())
}
Ok(self.0)
}
}
let visitor = Visitor(NormalizedParameter::default());
deserializer.deserialize_seq(visitor)
}
}
impl<K, V> FromIterator<(K, V)> for NormalizedParameter
where
K: Into<Cow<'static, str>>,
V: Into<Cow<'static, str>>,
{
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (K, V)>,
{
let mut target = NormalizedParameter::default();
iter.into_iter()
.for_each(|(k, v)| target.insert_or_poison(k.into(), v.into()));
target
}
}
impl ToOwned for dyn QueryParameter {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
impl ToOwned for dyn QueryParameter + Send {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned |
}
/// Return a reference to value in a collection if it is the only one.
///
/// For example, a vector of string like types returns a reference to its first
/// element if there are no other, else it returns `None`.
///
/// If this were done with slices, that would require choosing a particular
/// value type of the underlying slice e.g. `[String]`.
pub unsafe trait UniqueValue {
/// Borrow the unique value reference.
fn get_unique(&self) -> Option<&str>;
}
unsafe impl<K, V, S: BuildHasher> QueryParameter for HashMap<K, V, S>
where
K: Borrow<str> + Eq + Hash,
V: UniqueValue + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.get(key).and_then(V::get_unique).map(Cow::Borrowed)
}
fn normalize(&self) -> NormalizedParameter {
let inner = self
.iter()
.filter_map(|(key, val)| {
val.get_unique().map(|value| {
(
Cow::Owned(key.borrow().to_string()),
Some(Cow::Owned(value.to_string())),
)
})
})
.collect();
NormalizedParameter { inner }
}
}
unsafe impl<K, V> QueryParameter for Vec<(K, V)>
where
K: Borrow<str> + Eq + Hash,
V: Borrow<str> + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
let mut value = None;
for entry in self.iter() {
if entry.0.borrow() == key {
if value.is_some() {
return None;
}
value = Some(Cow::Borrowed(entry.1.borrow()));
}
}
value
}
fn normalize(&self) -> NormalizedParameter {
let mut params = NormalizedParameter::default();
self.iter()
.map(|&(ref key, ref val)| {
(
Cow::Owned(key.borrow().to_string()),
Cow::Owned(val.borrow().to_string()),
)
})
.for_each(|(key, val)| params.insert_or_poison(key, val));
params
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a mut Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl UniqueValue for str {
fn get_unique(&self) -> Option<&str> {
Some(self)
}
}
unsafe impl UniqueValue for String {
fn get_unique(&self) -> Option<&str> {
Some(&self)
}
}
unsafe impl<'a, V> UniqueValue for &'a V
where
V: AsRef<str> + ?Sized,
{
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<'a> UniqueValue for Cow<'a, str> {
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<V: UniqueValue> UniqueValue for Option<V> {
fn get_unique(&self) -> Option<&str> {
self.as_ref().and_then(V::get_unique)
}
}
unsafe impl<V: UniqueValue> UniqueValue for [V] {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Box<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Rc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Arc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue> UniqueValue for Vec<V> {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
mod test {
use super::*;
/// Compilation tests for various possible QueryParameter impls.
#[allow(unused)]
#[allow(dead_code)]
fn test_query_parameter_impls() {
let _ = (&HashMap::<String, String>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<&'static str, &'static str>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<Cow<'static, str>, Cow<'static, str>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Vec<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<[Cow<'static, str>]>>::new()) as &dyn QueryParameter;
}
}
| {
self.normalize()
} | identifier_body |
query.rs | use std::borrow::{Borrow, Cow};
use std::collections::HashMap;
use std::fmt;
use std::iter::FromIterator;
use std::hash::{BuildHasher, Hash};
use std::rc::Rc;
use std::sync::Arc;
use serde::de;
use serde::Deserializer;
/// Allows access to the query parameters in an url or a body.
///
/// Use one of the listed implementations below. Since those may be a bit confusing due to their
/// abundant use of generics, basically use any type of `HashMap` that maps 'str-likes' to a
/// collection of other 'str-likes'. Popular instances may be:
/// * `HashMap<String, String>`
/// * `HashMap<String, Vec<String>>`
/// * `HashMap<Cow<'static, str>, Cow<'static, str>>`
///
/// You should generally not have to implement this trait yourself, and if you do there are
/// additional requirements on your implementation to guarantee standard conformance. Therefore the
/// trait is marked as `unsafe`.
pub unsafe trait QueryParameter {
/// Get the **unique** value associated with a key.
///
/// If there are multiple values, return `None`. This is very important to guarantee
/// conformance to the RFC. Afaik it prevents potentially subverting validation middleware,
/// order dependent processing, or simple confusion between different components who parse the
/// query string from different ends.
fn unique_value(&self, key: &str) -> Option<Cow<str>>;
/// Guarantees that one can grab an owned copy.
fn normalize(&self) -> NormalizedParameter;
}
/// The query parameter normal form.
///
/// When a request wants to give access to its query or body parameters by reference, it can do so
/// by a reference of the particular trait. But when the representation of the query is not stored
/// in the memory associated with the request, it needs to be allocated to outlive the borrow on
/// the request. This allocation may as well perform the minimization/normalization into a
/// representation actually consumed by the backend. This normal form thus encapsulates the
/// associated `clone-into-normal form` by various possible constructors from references [WIP].
///
/// This gives rise to a custom `Cow<QueryParameter>` instance by requiring that normalization into
/// memory with unrelated lifetime is always possible.
///
/// Internally a hashmap but this may change due to optimizations.
#[derive(Clone, Debug, Default)]
pub struct NormalizedParameter {
/// The value is `None` if the key appeared at least twice.
inner: HashMap<Cow<'static, str>, Option<Cow<'static, str>>>,
}
unsafe impl QueryParameter for NormalizedParameter {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.inner
.get(key)
.and_then(|val| val.as_ref().map(Cow::as_ref).map(Cow::Borrowed))
}
fn normalize(&self) -> NormalizedParameter {
self.clone()
}
}
impl NormalizedParameter {
/// Create an empty map.
pub fn new() -> Self {
NormalizedParameter::default()
}
/// Insert a key-value-pair or mark key as dead if already present.
///
/// Since each key must appear at most once, we do not remove it from the map but instead mark
/// the key as having a duplicate entry.
pub fn insert_or_poison(&mut self, key: Cow<'static, str>, val: Cow<'static, str>) {
let unique_val = Some(val);
self.inner
.entry(key)
.and_modify(|val| *val = None)
.or_insert(unique_val);
}
}
impl Borrow<dyn QueryParameter> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + 'static) {
self
}
}
impl Borrow<dyn QueryParameter + Send> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + Send + 'static) {
self
}
}
impl<'de> de::Deserialize<'de> for NormalizedParameter {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor(NormalizedParameter);
impl<'a> de::Visitor<'a> for Visitor {
type Value = NormalizedParameter;
fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "a sequence of key-value-pairs")
}
fn visit_seq<A>(mut self, mut access: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'a>,
{
while let Some((key, value)) = access.next_element::<(String, String)>()? {
self.0.insert_or_poison(key.into(), value.into())
}
Ok(self.0)
}
}
let visitor = Visitor(NormalizedParameter::default());
deserializer.deserialize_seq(visitor)
}
}
impl<K, V> FromIterator<(K, V)> for NormalizedParameter
where
K: Into<Cow<'static, str>>,
V: Into<Cow<'static, str>>,
{
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (K, V)>,
{
let mut target = NormalizedParameter::default();
iter.into_iter()
.for_each(|(k, v)| target.insert_or_poison(k.into(), v.into()));
target
}
}
impl ToOwned for dyn QueryParameter {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
impl ToOwned for dyn QueryParameter + Send {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
/// Return a reference to value in a collection if it is the only one.
///
/// For example, a vector of string like types returns a reference to its first
/// element if there are no other, else it returns `None`.
///
/// If this were done with slices, that would require choosing a particular
/// value type of the underlying slice e.g. `[String]`.
pub unsafe trait UniqueValue {
/// Borrow the unique value reference.
fn get_unique(&self) -> Option<&str>;
}
unsafe impl<K, V, S: BuildHasher> QueryParameter for HashMap<K, V, S>
where
K: Borrow<str> + Eq + Hash,
V: UniqueValue + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.get(key).and_then(V::get_unique).map(Cow::Borrowed)
}
fn normalize(&self) -> NormalizedParameter {
let inner = self
.iter()
.filter_map(|(key, val)| {
val.get_unique().map(|value| {
(
Cow::Owned(key.borrow().to_string()),
Some(Cow::Owned(value.to_string())),
)
})
})
.collect();
NormalizedParameter { inner }
}
}
unsafe impl<K, V> QueryParameter for Vec<(K, V)>
where
K: Borrow<str> + Eq + Hash,
V: Borrow<str> + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
let mut value = None;
for entry in self.iter() {
if entry.0.borrow() == key {
if value.is_some() {
return None;
}
value = Some(Cow::Borrowed(entry.1.borrow()));
}
}
value
}
fn normalize(&self) -> NormalizedParameter {
let mut params = NormalizedParameter::default();
self.iter()
.map(|&(ref key, ref val)| {
(
Cow::Owned(key.borrow().to_string()),
Cow::Owned(val.borrow().to_string()),
)
})
.for_each(|(key, val)| params.insert_or_poison(key, val));
params
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a mut Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl UniqueValue for str {
fn get_unique(&self) -> Option<&str> {
Some(self)
}
}
unsafe impl UniqueValue for String {
fn get_unique(&self) -> Option<&str> {
Some(&self)
}
}
unsafe impl<'a, V> UniqueValue for &'a V
where
V: AsRef<str> + ?Sized,
{
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<'a> UniqueValue for Cow<'a, str> {
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<V: UniqueValue> UniqueValue for Option<V> {
fn get_unique(&self) -> Option<&str> {
self.as_ref().and_then(V::get_unique)
}
}
unsafe impl<V: UniqueValue> UniqueValue for [V] {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Box<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Rc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Arc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue> UniqueValue for Vec<V> {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 | else {
self.get(0).and_then(V::get_unique)
}
}
}
mod test {
use super::*;
/// Compilation tests for various possible QueryParameter impls.
#[allow(unused)]
#[allow(dead_code)]
fn test_query_parameter_impls() {
let _ = (&HashMap::<String, String>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<&'static str, &'static str>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<Cow<'static, str>, Cow<'static, str>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Vec<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<[Cow<'static, str>]>>::new()) as &dyn QueryParameter;
}
}
| {
None
} | conditional_block |
query.rs | use std::borrow::{Borrow, Cow};
use std::collections::HashMap;
use std::fmt;
use std::iter::FromIterator;
use std::hash::{BuildHasher, Hash};
use std::rc::Rc;
use std::sync::Arc;
use serde::de;
use serde::Deserializer;
/// Allows access to the query parameters in an url or a body.
///
/// Use one of the listed implementations below. Since those may be a bit confusing due to their
/// abundant use of generics, basically use any type of `HashMap` that maps 'str-likes' to a
/// collection of other 'str-likes'. Popular instances may be:
/// * `HashMap<String, String>`
/// * `HashMap<String, Vec<String>>`
/// * `HashMap<Cow<'static, str>, Cow<'static, str>>`
///
/// You should generally not have to implement this trait yourself, and if you do there are
/// additional requirements on your implementation to guarantee standard conformance. Therefore the
/// trait is marked as `unsafe`.
pub unsafe trait QueryParameter {
/// Get the **unique** value associated with a key.
///
/// If there are multiple values, return `None`. This is very important to guarantee
/// conformance to the RFC. Afaik it prevents potentially subverting validation middleware,
/// order dependent processing, or simple confusion between different components who parse the
/// query string from different ends.
fn unique_value(&self, key: &str) -> Option<Cow<str>>;
/// Guarantees that one can grab an owned copy.
fn normalize(&self) -> NormalizedParameter;
}
/// The query parameter normal form.
///
/// When a request wants to give access to its query or body parameters by reference, it can do so
/// by a reference of the particular trait. But when the representation of the query is not stored
/// in the memory associated with the request, it needs to be allocated to outlive the borrow on
/// the request. This allocation may as well perform the minimization/normalization into a
/// representation actually consumed by the backend. This normal form thus encapsulates the
/// associated `clone-into-normal form` by various possible constructors from references [WIP].
///
/// This gives rise to a custom `Cow<QueryParameter>` instance by requiring that normalization into
/// memory with unrelated lifetime is always possible.
///
/// Internally a hashmap but this may change due to optimizations.
#[derive(Clone, Debug, Default)]
pub struct NormalizedParameter {
/// The value is `None` if the key appeared at least twice.
inner: HashMap<Cow<'static, str>, Option<Cow<'static, str>>>,
}
unsafe impl QueryParameter for NormalizedParameter {
fn | (&self, key: &str) -> Option<Cow<str>> {
self.inner
.get(key)
.and_then(|val| val.as_ref().map(Cow::as_ref).map(Cow::Borrowed))
}
fn normalize(&self) -> NormalizedParameter {
self.clone()
}
}
impl NormalizedParameter {
/// Create an empty map.
pub fn new() -> Self {
NormalizedParameter::default()
}
/// Insert a key-value-pair or mark key as dead if already present.
///
/// Since each key must appear at most once, we do not remove it from the map but instead mark
/// the key as having a duplicate entry.
pub fn insert_or_poison(&mut self, key: Cow<'static, str>, val: Cow<'static, str>) {
let unique_val = Some(val);
self.inner
.entry(key)
.and_modify(|val| *val = None)
.or_insert(unique_val);
}
}
impl Borrow<dyn QueryParameter> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + 'static) {
self
}
}
impl Borrow<dyn QueryParameter + Send> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + Send + 'static) {
self
}
}
impl<'de> de::Deserialize<'de> for NormalizedParameter {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor(NormalizedParameter);
impl<'a> de::Visitor<'a> for Visitor {
type Value = NormalizedParameter;
fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "a sequence of key-value-pairs")
}
fn visit_seq<A>(mut self, mut access: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'a>,
{
while let Some((key, value)) = access.next_element::<(String, String)>()? {
self.0.insert_or_poison(key.into(), value.into())
}
Ok(self.0)
}
}
let visitor = Visitor(NormalizedParameter::default());
deserializer.deserialize_seq(visitor)
}
}
impl<K, V> FromIterator<(K, V)> for NormalizedParameter
where
K: Into<Cow<'static, str>>,
V: Into<Cow<'static, str>>,
{
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (K, V)>,
{
let mut target = NormalizedParameter::default();
iter.into_iter()
.for_each(|(k, v)| target.insert_or_poison(k.into(), v.into()));
target
}
}
impl ToOwned for dyn QueryParameter {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
impl ToOwned for dyn QueryParameter + Send {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
/// Return a reference to value in a collection if it is the only one.
///
/// For example, a vector of string like types returns a reference to its first
/// element if there are no other, else it returns `None`.
///
/// If this were done with slices, that would require choosing a particular
/// value type of the underlying slice e.g. `[String]`.
pub unsafe trait UniqueValue {
/// Borrow the unique value reference.
fn get_unique(&self) -> Option<&str>;
}
unsafe impl<K, V, S: BuildHasher> QueryParameter for HashMap<K, V, S>
where
K: Borrow<str> + Eq + Hash,
V: UniqueValue + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.get(key).and_then(V::get_unique).map(Cow::Borrowed)
}
fn normalize(&self) -> NormalizedParameter {
let inner = self
.iter()
.filter_map(|(key, val)| {
val.get_unique().map(|value| {
(
Cow::Owned(key.borrow().to_string()),
Some(Cow::Owned(value.to_string())),
)
})
})
.collect();
NormalizedParameter { inner }
}
}
unsafe impl<K, V> QueryParameter for Vec<(K, V)>
where
K: Borrow<str> + Eq + Hash,
V: Borrow<str> + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
let mut value = None;
for entry in self.iter() {
if entry.0.borrow() == key {
if value.is_some() {
return None;
}
value = Some(Cow::Borrowed(entry.1.borrow()));
}
}
value
}
fn normalize(&self) -> NormalizedParameter {
let mut params = NormalizedParameter::default();
self.iter()
.map(|&(ref key, ref val)| {
(
Cow::Owned(key.borrow().to_string()),
Cow::Owned(val.borrow().to_string()),
)
})
.for_each(|(key, val)| params.insert_or_poison(key, val));
params
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a mut Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl UniqueValue for str {
fn get_unique(&self) -> Option<&str> {
Some(self)
}
}
unsafe impl UniqueValue for String {
fn get_unique(&self) -> Option<&str> {
Some(&self)
}
}
unsafe impl<'a, V> UniqueValue for &'a V
where
V: AsRef<str> + ?Sized,
{
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<'a> UniqueValue for Cow<'a, str> {
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<V: UniqueValue> UniqueValue for Option<V> {
fn get_unique(&self) -> Option<&str> {
self.as_ref().and_then(V::get_unique)
}
}
unsafe impl<V: UniqueValue> UniqueValue for [V] {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Box<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Rc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Arc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue> UniqueValue for Vec<V> {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
mod test {
use super::*;
/// Compilation tests for various possible QueryParameter impls.
#[allow(unused)]
#[allow(dead_code)]
fn test_query_parameter_impls() {
let _ = (&HashMap::<String, String>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<&'static str, &'static str>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<Cow<'static, str>, Cow<'static, str>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Vec<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<[Cow<'static, str>]>>::new()) as &dyn QueryParameter;
}
}
| unique_value | identifier_name |
Graph.ts | import * as acorn from 'acorn';
import injectClassFields from 'acorn-class-fields';
import injectExportNsFrom from 'acorn-export-ns-from';
import injectImportMeta from 'acorn-import-meta';
import injectStaticClassFeatures from 'acorn-static-class-features';
import GlobalScope from './ast/scopes/GlobalScope';
import { PathTracker } from './ast/utils/PathTracker';
import Chunk from './Chunk';
import ExternalModule from './ExternalModule';
import Module, { defaultAcornOptions } from './Module';
import { ModuleLoader, UnresolvedModule } from './ModuleLoader';
import {
GetManualChunk,
InputOptions,
IsExternal,
ManualChunksOption,
ModuleJSON,
RollupCache,
RollupWarning,
RollupWatcher,
SerializablePluginCache,
TreeshakingOptions,
WarningHandler
} from './rollup/types';
import { BuildPhase } from './utils/buildPhase';
import { getChunkAssignments } from './utils/chunkAssignment';
import { errDeprecation, error } from './utils/error';
import { analyseModuleExecution, sortByExecutionOrder } from './utils/executionOrder';
import { resolve } from './utils/path';
import { PluginDriver } from './utils/PluginDriver';
import relativeId from './utils/relativeId';
import { timeEnd, timeStart } from './utils/timers';
function normalizeEntryModules(
entryModules: string | string[] | Record<string, string>
): UnresolvedModule[] {
if (typeof entryModules === 'string') {
return [{ fileName: null, name: null, id: entryModules, importer: undefined }];
}
if (Array.isArray(entryModules)) {
return entryModules.map(id => ({ fileName: null, name: null, id, importer: undefined }));
}
return Object.keys(entryModules).map(name => ({
fileName: null,
id: entryModules[name],
importer: undefined,
name
}));
}
export default class Graph {
acornOptions: acorn.Options;
acornParser: typeof acorn.Parser;
cachedModules: Map<string, ModuleJSON>;
contextParse: (code: string, acornOptions?: acorn.Options) => acorn.Node;
deoptimizationTracker: PathTracker;
getModuleContext: (id: string) => string;
moduleById = new Map<string, Module | ExternalModule>();
moduleLoader: ModuleLoader;
needsTreeshakingPass = false;
phase: BuildPhase = BuildPhase.LOAD_AND_PARSE;
pluginDriver: PluginDriver;
preserveModules: boolean;
scope: GlobalScope;
shimMissingExports: boolean;
treeshakingOptions?: TreeshakingOptions;
watchFiles: Record<string, true> = Object.create(null);
private cacheExpiry: number;
private context: string;
private externalModules: ExternalModule[] = [];
private modules: Module[] = [];
private onwarn: WarningHandler;
private pluginCache?: Record<string, SerializablePluginCache>;
private strictDeprecations: boolean;
constructor(options: InputOptions, watcher: RollupWatcher | null) {
this.onwarn = options.onwarn as WarningHandler;
this.deoptimizationTracker = new PathTracker();
this.cachedModules = new Map();
if (options.cache) {
if (options.cache.modules)
for (const module of options.cache.modules) this.cachedModules.set(module.id, module);
}
if (options.cache !== false) {
this.pluginCache = (options.cache && options.cache.plugins) || Object.create(null);
// increment access counter
for (const name in this.pluginCache) {
const cache = this.pluginCache[name];
for (const key of Object.keys(cache)) cache[key][0]++;
}
}
this.preserveModules = options.preserveModules!;
this.strictDeprecations = options.strictDeprecations!;
this.cacheExpiry = options.experimentalCacheExpiry!;
if (options.treeshake !== false) {
this.treeshakingOptions =
options.treeshake && options.treeshake !== true
? {
annotations: options.treeshake.annotations !== false,
moduleSideEffects: options.treeshake.moduleSideEffects,
propertyReadSideEffects: options.treeshake.propertyReadSideEffects !== false,
pureExternalModules: options.treeshake.pureExternalModules,
tryCatchDeoptimization: options.treeshake.tryCatchDeoptimization !== false,
unknownGlobalSideEffects: options.treeshake.unknownGlobalSideEffects !== false
}
: {
annotations: true,
moduleSideEffects: true,
propertyReadSideEffects: true,
tryCatchDeoptimization: true,
unknownGlobalSideEffects: true
};
if (typeof this.treeshakingOptions.pureExternalModules !== 'undefined') {
this.warnDeprecation(
`The "treeshake.pureExternalModules" option is deprecated. The "treeshake.moduleSideEffects" option should be used instead. "treeshake.pureExternalModules: true" is equivalent to "treeshake.moduleSideEffects: 'no-external'"`,
true
);
}
}
this.contextParse = (code: string, options: acorn.Options = {}) =>
this.acornParser.parse(code, {
...defaultAcornOptions,
...options,
...this.acornOptions
});
this.pluginDriver = new PluginDriver(this, options.plugins!, this.pluginCache);
if (watcher) {
const handleChange = (id: string) => this.pluginDriver.hookSeqSync('watchChange', [id]);
watcher.on('change', handleChange);
watcher.once('restart', () => {
watcher.removeListener('change', handleChange);
});
}
this.shimMissingExports = options.shimMissingExports as boolean;
this.scope = new GlobalScope();
this.context = String(options.context);
const optionsModuleContext = options.moduleContext;
if (typeof optionsModuleContext === 'function') {
this.getModuleContext = id => optionsModuleContext(id) || this.context;
} else if (typeof optionsModuleContext === 'object') {
const moduleContext = new Map();
for (const key in optionsModuleContext) {
moduleContext.set(resolve(key), optionsModuleContext[key]);
}
this.getModuleContext = id => moduleContext.get(id) || this.context;
} else {
this.getModuleContext = () => this.context;
}
this.acornOptions = options.acorn ? { ...options.acorn } : {};
const acornPluginsToInject: Function[] = [];
acornPluginsToInject.push(
injectImportMeta,
injectExportNsFrom,
injectClassFields,
injectStaticClassFeatures
);
(this.acornOptions as any).allowAwaitOutsideFunction = true;
const acornInjectPlugins = options.acornInjectPlugins;
acornPluginsToInject.push(
...(Array.isArray(acornInjectPlugins)
? acornInjectPlugins
: acornInjectPlugins
? [acornInjectPlugins]
: [])
);
this.acornParser = acorn.Parser.extend(...(acornPluginsToInject as any));
this.moduleLoader = new ModuleLoader(
this,
this.moduleById,
this.pluginDriver,
options.preserveSymlinks === true,
options.external as (string | RegExp)[] | IsExternal,
(typeof options.manualChunks === 'function' && options.manualChunks) as GetManualChunk | null,
(this.treeshakingOptions ? this.treeshakingOptions.moduleSideEffects : null)!,
(this.treeshakingOptions ? this.treeshakingOptions.pureExternalModules : false)!
);
}
build(
entryModules: string | string[] | Record<string, string>,
manualChunks: ManualChunksOption | void,
inlineDynamicImports: boolean
): Promise<Chunk[]> {
// Phase 1 – discovery. We load the entry module and find which
// modules it imports, and import those, until we have all
// of the entry module's dependencies
timeStart('parse modules', 2);
return Promise.all([
this.moduleLoader.addEntryModules(normalizeEntryModules(entryModules), true),
(manualChunks &&
typeof manualChunks === 'object' &&
this.moduleLoader.addManualChunks(manualChunks)) as Promise<void>
]).then(([{ entryModules, manualChunkModulesByAlias }]) => {
if (entryModules.length === 0) {
throw new Error('You must supply options.input to rollup');
}
for (const module of this.moduleById.values()) {
if (module instanceof Module) {
this.modules.push(module);
} else {
this.externalModules.push(module);
}
}
timeEnd('parse modules', 2);
this.phase = BuildPhase.ANALYSE;
// Phase 2 - linking. We populate the module dependency links and
// determine the topological execution order for the bundle
timeStart('analyse dependency graph', 2);
this.link(entryModules);
timeEnd('analyse dependency graph', 2);
// Phase 3 – marking. We include all statements that should be included
timeStart('mark included statements', 2);
for (const module of entryModules) {
module.includeAllExports();
}
this.includeMarked(this.modules);
// check for unused external imports
for (const externalModule of this.externalModules) externalModule.warnUnusedImports();
timeEnd('mark included statements', 2);
// Phase 4 – we construct the chunks, working out the optimal chunking using
// entry point graph colouring, before generating the import and export facades
timeStart('generate chunks', 2);
// TODO: there is one special edge case unhandled here and that is that any module
// exposed as an unresolvable export * (to a graph external export *,
// either as a namespace import reexported or top-level export *)
// should be made to be its own entry point module before chunking
const chunks: Chunk[] = [];
if (this.preserveModules) {
for (const module of this.modules) {
if (
module.isIncluded() ||
module.isEntryPoint ||
module.dynamicallyImportedBy.length > 0
) {
const chunk = new Chunk(this, [module]);
chunk.entryModules = [module];
chunks.push(chunk);
}
}
} else {
for (const chunkModules of inlineDynamicImports
? [this.modules]
: getChunkAssignments(entryModules, manualChunkModulesByAlias)) {
sortByExecutionOrder(chunkModules);
chunks.push(new Chunk(this, chunkModules));
}
}
for (const chunk of chunks) {
chunk.link();
}
const facades: Chunk[] = [];
for (const chunk of chunks) {
facades.push(...chunk.generateFacades());
}
timeEnd('generate chunks', 2);
this.phase = BuildPhase.GENERATE;
return [...chunks, ...facades];
});
}
getCache(): RollupCache {
// handle plugin cache eviction
for (const name in this.pluginCache) {
const cache = this.pluginCache[name];
let allDeleted = true;
for (const key of Object.keys(cache)) {
if (cache[key][0] >= this.cacheExpiry) delete cache[key];
else allDeleted = false;
}
if (allDeleted) delete this.pluginCache[name];
}
return {
modules: this.modules.map(module => module.toJSON()),
plugins: this.pluginCache
};
}
includeMarked(modules: Module[]) {
if (this.treeshakingOptions) {
let treeshakingPass = 1;
do {
timeStart(`treeshaking pass ${treeshakingPass}`, 3);
this.needsTreeshakingPass = false;
for (const module of modules) {
if (module.isExecuted) module.include();
}
timeEnd(`treeshaking pass ${treeshakingPass++}`, 3);
} while (this.needsTreeshakingPass);
} else {
// Necessary to properly replace namespace imports
for (const module of modules) module.includeAllInBundle();
}
}
warn(warning: RollupWarning) {
warning.toString = () => {
let str = '';
if (warning.plugin) str += `(${warning.plugin} plugin) `;
if (warning.loc)
str += `${relativeId(warning.loc.file!)} (${warning.loc.line}:${warning.loc.column}) `;
str += warning.message;
return str;
};
this.onwarn(warning);
}
warnDeprecation(deprecation: string | RollupWarning, activeDeprecation: boolean): void {
if | vate link(entryModules: Module[]) {
for (const module of this.modules) {
module.linkDependencies();
}
const { orderedModules, cyclePaths } = analyseModuleExecution(entryModules);
for (const cyclePath of cyclePaths) {
this.warn({
code: 'CIRCULAR_DEPENDENCY',
cycle: cyclePath,
importer: cyclePath[0],
message: `Circular dependency: ${cyclePath.join(' -> ')}`
});
}
this.modules = orderedModules;
for (const module of this.modules) {
module.bindReferences();
}
this.warnForMissingExports();
}
private warnForMissingExports() {
for (const module of this.modules) {
for (const importName of Object.keys(module.importDescriptions)) {
const importDescription = module.importDescriptions[importName];
if (
importDescription.name !== '*' &&
!(importDescription.module as Module).getVariableForExportName(importDescription.name)
) {
module.warn(
{
code: 'NON_EXISTENT_EXPORT',
message: `Non-existent export '${
importDescription.name
}' is imported from ${relativeId((importDescription.module as Module).id)}`,
name: importDescription.name,
source: (importDescription.module as Module).id
},
importDescription.start
);
}
}
}
}
}
| (activeDeprecation || this.strictDeprecations) {
const warning = errDeprecation(deprecation);
if (this.strictDeprecations) {
return error(warning);
}
this.warn(warning);
}
}
pri | identifier_body |
Graph.ts | import * as acorn from 'acorn';
import injectClassFields from 'acorn-class-fields';
import injectExportNsFrom from 'acorn-export-ns-from';
import injectImportMeta from 'acorn-import-meta';
import injectStaticClassFeatures from 'acorn-static-class-features';
import GlobalScope from './ast/scopes/GlobalScope';
import { PathTracker } from './ast/utils/PathTracker';
import Chunk from './Chunk';
import ExternalModule from './ExternalModule';
import Module, { defaultAcornOptions } from './Module';
import { ModuleLoader, UnresolvedModule } from './ModuleLoader';
import {
GetManualChunk,
InputOptions,
IsExternal,
ManualChunksOption,
ModuleJSON,
RollupCache,
RollupWarning,
RollupWatcher,
SerializablePluginCache,
TreeshakingOptions,
WarningHandler
} from './rollup/types';
import { BuildPhase } from './utils/buildPhase';
import { getChunkAssignments } from './utils/chunkAssignment';
import { errDeprecation, error } from './utils/error';
import { analyseModuleExecution, sortByExecutionOrder } from './utils/executionOrder';
import { resolve } from './utils/path';
import { PluginDriver } from './utils/PluginDriver';
import relativeId from './utils/relativeId';
import { timeEnd, timeStart } from './utils/timers';
function normalizeEntryModules(
entryModules: string | string[] | Record<string, string>
): UnresolvedModule[] {
if (typeof entryModules === 'string') {
return [{ fileName: null, name: null, id: entryModules, importer: undefined }];
}
if (Array.isArray(entryModules)) {
return entryModules.map(id => ({ fileName: null, name: null, id, importer: undefined }));
}
return Object.keys(entryModules).map(name => ({
fileName: null,
id: entryModules[name],
importer: undefined,
name
}));
}
export default class Graph {
acornOptions: acorn.Options;
acornParser: typeof acorn.Parser;
cachedModules: Map<string, ModuleJSON>;
contextParse: (code: string, acornOptions?: acorn.Options) => acorn.Node;
deoptimizationTracker: PathTracker;
getModuleContext: (id: string) => string;
moduleById = new Map<string, Module | ExternalModule>();
moduleLoader: ModuleLoader;
needsTreeshakingPass = false;
phase: BuildPhase = BuildPhase.LOAD_AND_PARSE;
pluginDriver: PluginDriver;
preserveModules: boolean;
scope: GlobalScope;
shimMissingExports: boolean;
treeshakingOptions?: TreeshakingOptions;
watchFiles: Record<string, true> = Object.create(null);
private cacheExpiry: number;
private context: string;
private externalModules: ExternalModule[] = [];
private modules: Module[] = [];
private onwarn: WarningHandler;
private pluginCache?: Record<string, SerializablePluginCache>;
private strictDeprecations: boolean;
constructor(options: InputOptions, watcher: RollupWatcher | null) {
this.onwarn = options.onwarn as WarningHandler;
this.deoptimizationTracker = new PathTracker();
this.cachedModules = new Map();
if (options.cache) {
if (options.cache.modules)
for (const module of options.cache.modules) this.cachedModules.set(module.id, module);
}
if (options.cache !== false) {
this.pluginCache = (options.cache && options.cache.plugins) || Object.create(null);
// increment access counter
for (const name in this.pluginCache) {
const cache = this.pluginCache[name];
for (const key of Object.keys(cache)) cache[key][0]++;
}
}
this.preserveModules = options.preserveModules!;
this.strictDeprecations = options.strictDeprecations!;
this.cacheExpiry = options.experimentalCacheExpiry!;
if (options.treeshake !== false) {
this.treeshakingOptions =
options.treeshake && options.treeshake !== true
? {
annotations: options.treeshake.annotations !== false,
moduleSideEffects: options.treeshake.moduleSideEffects,
propertyReadSideEffects: options.treeshake.propertyReadSideEffects !== false,
pureExternalModules: options.treeshake.pureExternalModules,
tryCatchDeoptimization: options.treeshake.tryCatchDeoptimization !== false,
unknownGlobalSideEffects: options.treeshake.unknownGlobalSideEffects !== false
}
: {
annotations: true,
moduleSideEffects: true,
propertyReadSideEffects: true,
tryCatchDeoptimization: true,
unknownGlobalSideEffects: true
};
if (typeof this.treeshakingOptions.pureExternalModules !== 'undefined') {
this.warnDeprecation(
`The "treeshake.pureExternalModules" option is deprecated. The "treeshake.moduleSideEffects" option should be used instead. "treeshake.pureExternalModules: true" is equivalent to "treeshake.moduleSideEffects: 'no-external'"`,
true
);
}
}
this.contextParse = (code: string, options: acorn.Options = {}) =>
this.acornParser.parse(code, {
...defaultAcornOptions,
...options,
...this.acornOptions
}); | this.pluginDriver = new PluginDriver(this, options.plugins!, this.pluginCache);
if (watcher) {
const handleChange = (id: string) => this.pluginDriver.hookSeqSync('watchChange', [id]);
watcher.on('change', handleChange);
watcher.once('restart', () => {
watcher.removeListener('change', handleChange);
});
}
this.shimMissingExports = options.shimMissingExports as boolean;
this.scope = new GlobalScope();
this.context = String(options.context);
const optionsModuleContext = options.moduleContext;
if (typeof optionsModuleContext === 'function') {
this.getModuleContext = id => optionsModuleContext(id) || this.context;
} else if (typeof optionsModuleContext === 'object') {
const moduleContext = new Map();
for (const key in optionsModuleContext) {
moduleContext.set(resolve(key), optionsModuleContext[key]);
}
this.getModuleContext = id => moduleContext.get(id) || this.context;
} else {
this.getModuleContext = () => this.context;
}
this.acornOptions = options.acorn ? { ...options.acorn } : {};
const acornPluginsToInject: Function[] = [];
acornPluginsToInject.push(
injectImportMeta,
injectExportNsFrom,
injectClassFields,
injectStaticClassFeatures
);
(this.acornOptions as any).allowAwaitOutsideFunction = true;
const acornInjectPlugins = options.acornInjectPlugins;
acornPluginsToInject.push(
...(Array.isArray(acornInjectPlugins)
? acornInjectPlugins
: acornInjectPlugins
? [acornInjectPlugins]
: [])
);
this.acornParser = acorn.Parser.extend(...(acornPluginsToInject as any));
this.moduleLoader = new ModuleLoader(
this,
this.moduleById,
this.pluginDriver,
options.preserveSymlinks === true,
options.external as (string | RegExp)[] | IsExternal,
(typeof options.manualChunks === 'function' && options.manualChunks) as GetManualChunk | null,
(this.treeshakingOptions ? this.treeshakingOptions.moduleSideEffects : null)!,
(this.treeshakingOptions ? this.treeshakingOptions.pureExternalModules : false)!
);
}
build(
entryModules: string | string[] | Record<string, string>,
manualChunks: ManualChunksOption | void,
inlineDynamicImports: boolean
): Promise<Chunk[]> {
// Phase 1 – discovery. We load the entry module and find which
// modules it imports, and import those, until we have all
// of the entry module's dependencies
timeStart('parse modules', 2);
return Promise.all([
this.moduleLoader.addEntryModules(normalizeEntryModules(entryModules), true),
(manualChunks &&
typeof manualChunks === 'object' &&
this.moduleLoader.addManualChunks(manualChunks)) as Promise<void>
]).then(([{ entryModules, manualChunkModulesByAlias }]) => {
if (entryModules.length === 0) {
throw new Error('You must supply options.input to rollup');
}
for (const module of this.moduleById.values()) {
if (module instanceof Module) {
this.modules.push(module);
} else {
this.externalModules.push(module);
}
}
timeEnd('parse modules', 2);
this.phase = BuildPhase.ANALYSE;
// Phase 2 - linking. We populate the module dependency links and
// determine the topological execution order for the bundle
timeStart('analyse dependency graph', 2);
this.link(entryModules);
timeEnd('analyse dependency graph', 2);
// Phase 3 – marking. We include all statements that should be included
timeStart('mark included statements', 2);
for (const module of entryModules) {
module.includeAllExports();
}
this.includeMarked(this.modules);
// check for unused external imports
for (const externalModule of this.externalModules) externalModule.warnUnusedImports();
timeEnd('mark included statements', 2);
// Phase 4 – we construct the chunks, working out the optimal chunking using
// entry point graph colouring, before generating the import and export facades
timeStart('generate chunks', 2);
// TODO: there is one special edge case unhandled here and that is that any module
// exposed as an unresolvable export * (to a graph external export *,
// either as a namespace import reexported or top-level export *)
// should be made to be its own entry point module before chunking
const chunks: Chunk[] = [];
if (this.preserveModules) {
for (const module of this.modules) {
if (
module.isIncluded() ||
module.isEntryPoint ||
module.dynamicallyImportedBy.length > 0
) {
const chunk = new Chunk(this, [module]);
chunk.entryModules = [module];
chunks.push(chunk);
}
}
} else {
for (const chunkModules of inlineDynamicImports
? [this.modules]
: getChunkAssignments(entryModules, manualChunkModulesByAlias)) {
sortByExecutionOrder(chunkModules);
chunks.push(new Chunk(this, chunkModules));
}
}
for (const chunk of chunks) {
chunk.link();
}
const facades: Chunk[] = [];
for (const chunk of chunks) {
facades.push(...chunk.generateFacades());
}
timeEnd('generate chunks', 2);
this.phase = BuildPhase.GENERATE;
return [...chunks, ...facades];
});
}
getCache(): RollupCache {
// handle plugin cache eviction
for (const name in this.pluginCache) {
const cache = this.pluginCache[name];
let allDeleted = true;
for (const key of Object.keys(cache)) {
if (cache[key][0] >= this.cacheExpiry) delete cache[key];
else allDeleted = false;
}
if (allDeleted) delete this.pluginCache[name];
}
return {
modules: this.modules.map(module => module.toJSON()),
plugins: this.pluginCache
};
}
includeMarked(modules: Module[]) {
if (this.treeshakingOptions) {
let treeshakingPass = 1;
do {
timeStart(`treeshaking pass ${treeshakingPass}`, 3);
this.needsTreeshakingPass = false;
for (const module of modules) {
if (module.isExecuted) module.include();
}
timeEnd(`treeshaking pass ${treeshakingPass++}`, 3);
} while (this.needsTreeshakingPass);
} else {
// Necessary to properly replace namespace imports
for (const module of modules) module.includeAllInBundle();
}
}
warn(warning: RollupWarning) {
warning.toString = () => {
let str = '';
if (warning.plugin) str += `(${warning.plugin} plugin) `;
if (warning.loc)
str += `${relativeId(warning.loc.file!)} (${warning.loc.line}:${warning.loc.column}) `;
str += warning.message;
return str;
};
this.onwarn(warning);
}
warnDeprecation(deprecation: string | RollupWarning, activeDeprecation: boolean): void {
if (activeDeprecation || this.strictDeprecations) {
const warning = errDeprecation(deprecation);
if (this.strictDeprecations) {
return error(warning);
}
this.warn(warning);
}
}
private link(entryModules: Module[]) {
for (const module of this.modules) {
module.linkDependencies();
}
const { orderedModules, cyclePaths } = analyseModuleExecution(entryModules);
for (const cyclePath of cyclePaths) {
this.warn({
code: 'CIRCULAR_DEPENDENCY',
cycle: cyclePath,
importer: cyclePath[0],
message: `Circular dependency: ${cyclePath.join(' -> ')}`
});
}
this.modules = orderedModules;
for (const module of this.modules) {
module.bindReferences();
}
this.warnForMissingExports();
}
private warnForMissingExports() {
for (const module of this.modules) {
for (const importName of Object.keys(module.importDescriptions)) {
const importDescription = module.importDescriptions[importName];
if (
importDescription.name !== '*' &&
!(importDescription.module as Module).getVariableForExportName(importDescription.name)
) {
module.warn(
{
code: 'NON_EXISTENT_EXPORT',
message: `Non-existent export '${
importDescription.name
}' is imported from ${relativeId((importDescription.module as Module).id)}`,
name: importDescription.name,
source: (importDescription.module as Module).id
},
importDescription.start
);
}
}
}
}
} | random_line_split | |
Graph.ts | import * as acorn from 'acorn';
import injectClassFields from 'acorn-class-fields';
import injectExportNsFrom from 'acorn-export-ns-from';
import injectImportMeta from 'acorn-import-meta';
import injectStaticClassFeatures from 'acorn-static-class-features';
import GlobalScope from './ast/scopes/GlobalScope';
import { PathTracker } from './ast/utils/PathTracker';
import Chunk from './Chunk';
import ExternalModule from './ExternalModule';
import Module, { defaultAcornOptions } from './Module';
import { ModuleLoader, UnresolvedModule } from './ModuleLoader';
import {
GetManualChunk,
InputOptions,
IsExternal,
ManualChunksOption,
ModuleJSON,
RollupCache,
RollupWarning,
RollupWatcher,
SerializablePluginCache,
TreeshakingOptions,
WarningHandler
} from './rollup/types';
import { BuildPhase } from './utils/buildPhase';
import { getChunkAssignments } from './utils/chunkAssignment';
import { errDeprecation, error } from './utils/error';
import { analyseModuleExecution, sortByExecutionOrder } from './utils/executionOrder';
import { resolve } from './utils/path';
import { PluginDriver } from './utils/PluginDriver';
import relativeId from './utils/relativeId';
import { timeEnd, timeStart } from './utils/timers';
function normalizeEntryModules(
entryModules: string | string[] | Record<string, string>
): UnresolvedModule[] {
if (typeof entryModules === 'string') {
return [{ fileName: null, name: null, id: entryModules, importer: undefined }];
}
if (Array.isArray(entryModules)) |
return Object.keys(entryModules).map(name => ({
fileName: null,
id: entryModules[name],
importer: undefined,
name
}));
}
export default class Graph {
acornOptions: acorn.Options;
acornParser: typeof acorn.Parser;
cachedModules: Map<string, ModuleJSON>;
contextParse: (code: string, acornOptions?: acorn.Options) => acorn.Node;
deoptimizationTracker: PathTracker;
getModuleContext: (id: string) => string;
moduleById = new Map<string, Module | ExternalModule>();
moduleLoader: ModuleLoader;
needsTreeshakingPass = false;
phase: BuildPhase = BuildPhase.LOAD_AND_PARSE;
pluginDriver: PluginDriver;
preserveModules: boolean;
scope: GlobalScope;
shimMissingExports: boolean;
treeshakingOptions?: TreeshakingOptions;
watchFiles: Record<string, true> = Object.create(null);
private cacheExpiry: number;
private context: string;
private externalModules: ExternalModule[] = [];
private modules: Module[] = [];
private onwarn: WarningHandler;
private pluginCache?: Record<string, SerializablePluginCache>;
private strictDeprecations: boolean;
constructor(options: InputOptions, watcher: RollupWatcher | null) {
this.onwarn = options.onwarn as WarningHandler;
this.deoptimizationTracker = new PathTracker();
this.cachedModules = new Map();
if (options.cache) {
if (options.cache.modules)
for (const module of options.cache.modules) this.cachedModules.set(module.id, module);
}
if (options.cache !== false) {
this.pluginCache = (options.cache && options.cache.plugins) || Object.create(null);
// increment access counter
for (const name in this.pluginCache) {
const cache = this.pluginCache[name];
for (const key of Object.keys(cache)) cache[key][0]++;
}
}
this.preserveModules = options.preserveModules!;
this.strictDeprecations = options.strictDeprecations!;
this.cacheExpiry = options.experimentalCacheExpiry!;
if (options.treeshake !== false) {
this.treeshakingOptions =
options.treeshake && options.treeshake !== true
? {
annotations: options.treeshake.annotations !== false,
moduleSideEffects: options.treeshake.moduleSideEffects,
propertyReadSideEffects: options.treeshake.propertyReadSideEffects !== false,
pureExternalModules: options.treeshake.pureExternalModules,
tryCatchDeoptimization: options.treeshake.tryCatchDeoptimization !== false,
unknownGlobalSideEffects: options.treeshake.unknownGlobalSideEffects !== false
}
: {
annotations: true,
moduleSideEffects: true,
propertyReadSideEffects: true,
tryCatchDeoptimization: true,
unknownGlobalSideEffects: true
};
if (typeof this.treeshakingOptions.pureExternalModules !== 'undefined') {
this.warnDeprecation(
`The "treeshake.pureExternalModules" option is deprecated. The "treeshake.moduleSideEffects" option should be used instead. "treeshake.pureExternalModules: true" is equivalent to "treeshake.moduleSideEffects: 'no-external'"`,
true
);
}
}
this.contextParse = (code: string, options: acorn.Options = {}) =>
this.acornParser.parse(code, {
...defaultAcornOptions,
...options,
...this.acornOptions
});
this.pluginDriver = new PluginDriver(this, options.plugins!, this.pluginCache);
if (watcher) {
const handleChange = (id: string) => this.pluginDriver.hookSeqSync('watchChange', [id]);
watcher.on('change', handleChange);
watcher.once('restart', () => {
watcher.removeListener('change', handleChange);
});
}
this.shimMissingExports = options.shimMissingExports as boolean;
this.scope = new GlobalScope();
this.context = String(options.context);
const optionsModuleContext = options.moduleContext;
if (typeof optionsModuleContext === 'function') {
this.getModuleContext = id => optionsModuleContext(id) || this.context;
} else if (typeof optionsModuleContext === 'object') {
const moduleContext = new Map();
for (const key in optionsModuleContext) {
moduleContext.set(resolve(key), optionsModuleContext[key]);
}
this.getModuleContext = id => moduleContext.get(id) || this.context;
} else {
this.getModuleContext = () => this.context;
}
this.acornOptions = options.acorn ? { ...options.acorn } : {};
const acornPluginsToInject: Function[] = [];
acornPluginsToInject.push(
injectImportMeta,
injectExportNsFrom,
injectClassFields,
injectStaticClassFeatures
);
(this.acornOptions as any).allowAwaitOutsideFunction = true;
const acornInjectPlugins = options.acornInjectPlugins;
acornPluginsToInject.push(
...(Array.isArray(acornInjectPlugins)
? acornInjectPlugins
: acornInjectPlugins
? [acornInjectPlugins]
: [])
);
this.acornParser = acorn.Parser.extend(...(acornPluginsToInject as any));
this.moduleLoader = new ModuleLoader(
this,
this.moduleById,
this.pluginDriver,
options.preserveSymlinks === true,
options.external as (string | RegExp)[] | IsExternal,
(typeof options.manualChunks === 'function' && options.manualChunks) as GetManualChunk | null,
(this.treeshakingOptions ? this.treeshakingOptions.moduleSideEffects : null)!,
(this.treeshakingOptions ? this.treeshakingOptions.pureExternalModules : false)!
);
}
build(
entryModules: string | string[] | Record<string, string>,
manualChunks: ManualChunksOption | void,
inlineDynamicImports: boolean
): Promise<Chunk[]> {
// Phase 1 – discovery. We load the entry module and find which
// modules it imports, and import those, until we have all
// of the entry module's dependencies
timeStart('parse modules', 2);
return Promise.all([
this.moduleLoader.addEntryModules(normalizeEntryModules(entryModules), true),
(manualChunks &&
typeof manualChunks === 'object' &&
this.moduleLoader.addManualChunks(manualChunks)) as Promise<void>
]).then(([{ entryModules, manualChunkModulesByAlias }]) => {
if (entryModules.length === 0) {
throw new Error('You must supply options.input to rollup');
}
for (const module of this.moduleById.values()) {
if (module instanceof Module) {
this.modules.push(module);
} else {
this.externalModules.push(module);
}
}
timeEnd('parse modules', 2);
this.phase = BuildPhase.ANALYSE;
// Phase 2 - linking. We populate the module dependency links and
// determine the topological execution order for the bundle
timeStart('analyse dependency graph', 2);
this.link(entryModules);
timeEnd('analyse dependency graph', 2);
// Phase 3 – marking. We include all statements that should be included
timeStart('mark included statements', 2);
for (const module of entryModules) {
module.includeAllExports();
}
this.includeMarked(this.modules);
// check for unused external imports
for (const externalModule of this.externalModules) externalModule.warnUnusedImports();
timeEnd('mark included statements', 2);
// Phase 4 – we construct the chunks, working out the optimal chunking using
// entry point graph colouring, before generating the import and export facades
timeStart('generate chunks', 2);
// TODO: there is one special edge case unhandled here and that is that any module
// exposed as an unresolvable export * (to a graph external export *,
// either as a namespace import reexported or top-level export *)
// should be made to be its own entry point module before chunking
const chunks: Chunk[] = [];
if (this.preserveModules) {
for (const module of this.modules) {
if (
module.isIncluded() ||
module.isEntryPoint ||
module.dynamicallyImportedBy.length > 0
) {
const chunk = new Chunk(this, [module]);
chunk.entryModules = [module];
chunks.push(chunk);
}
}
} else {
for (const chunkModules of inlineDynamicImports
? [this.modules]
: getChunkAssignments(entryModules, manualChunkModulesByAlias)) {
sortByExecutionOrder(chunkModules);
chunks.push(new Chunk(this, chunkModules));
}
}
for (const chunk of chunks) {
chunk.link();
}
const facades: Chunk[] = [];
for (const chunk of chunks) {
facades.push(...chunk.generateFacades());
}
timeEnd('generate chunks', 2);
this.phase = BuildPhase.GENERATE;
return [...chunks, ...facades];
});
}
getCache(): RollupCache {
// handle plugin cache eviction
for (const name in this.pluginCache) {
const cache = this.pluginCache[name];
let allDeleted = true;
for (const key of Object.keys(cache)) {
if (cache[key][0] >= this.cacheExpiry) delete cache[key];
else allDeleted = false;
}
if (allDeleted) delete this.pluginCache[name];
}
return {
modules: this.modules.map(module => module.toJSON()),
plugins: this.pluginCache
};
}
includeMarked(modules: Module[]) {
if (this.treeshakingOptions) {
let treeshakingPass = 1;
do {
timeStart(`treeshaking pass ${treeshakingPass}`, 3);
this.needsTreeshakingPass = false;
for (const module of modules) {
if (module.isExecuted) module.include();
}
timeEnd(`treeshaking pass ${treeshakingPass++}`, 3);
} while (this.needsTreeshakingPass);
} else {
// Necessary to properly replace namespace imports
for (const module of modules) module.includeAllInBundle();
}
}
warn(warning: RollupWarning) {
warning.toString = () => {
let str = '';
if (warning.plugin) str += `(${warning.plugin} plugin) `;
if (warning.loc)
str += `${relativeId(warning.loc.file!)} (${warning.loc.line}:${warning.loc.column}) `;
str += warning.message;
return str;
};
this.onwarn(warning);
}
warnDeprecation(deprecation: string | RollupWarning, activeDeprecation: boolean): void {
if (activeDeprecation || this.strictDeprecations) {
const warning = errDeprecation(deprecation);
if (this.strictDeprecations) {
return error(warning);
}
this.warn(warning);
}
}
private link(entryModules: Module[]) {
for (const module of this.modules) {
module.linkDependencies();
}
const { orderedModules, cyclePaths } = analyseModuleExecution(entryModules);
for (const cyclePath of cyclePaths) {
this.warn({
code: 'CIRCULAR_DEPENDENCY',
cycle: cyclePath,
importer: cyclePath[0],
message: `Circular dependency: ${cyclePath.join(' -> ')}`
});
}
this.modules = orderedModules;
for (const module of this.modules) {
module.bindReferences();
}
this.warnForMissingExports();
}
private warnForMissingExports() {
for (const module of this.modules) {
for (const importName of Object.keys(module.importDescriptions)) {
const importDescription = module.importDescriptions[importName];
if (
importDescription.name !== '*' &&
!(importDescription.module as Module).getVariableForExportName(importDescription.name)
) {
module.warn(
{
code: 'NON_EXISTENT_EXPORT',
message: `Non-existent export '${
importDescription.name
}' is imported from ${relativeId((importDescription.module as Module).id)}`,
name: importDescription.name,
source: (importDescription.module as Module).id
},
importDescription.start
);
}
}
}
}
}
| {
return entryModules.map(id => ({ fileName: null, name: null, id, importer: undefined }));
} | conditional_block |
Graph.ts | import * as acorn from 'acorn';
import injectClassFields from 'acorn-class-fields';
import injectExportNsFrom from 'acorn-export-ns-from';
import injectImportMeta from 'acorn-import-meta';
import injectStaticClassFeatures from 'acorn-static-class-features';
import GlobalScope from './ast/scopes/GlobalScope';
import { PathTracker } from './ast/utils/PathTracker';
import Chunk from './Chunk';
import ExternalModule from './ExternalModule';
import Module, { defaultAcornOptions } from './Module';
import { ModuleLoader, UnresolvedModule } from './ModuleLoader';
import {
GetManualChunk,
InputOptions,
IsExternal,
ManualChunksOption,
ModuleJSON,
RollupCache,
RollupWarning,
RollupWatcher,
SerializablePluginCache,
TreeshakingOptions,
WarningHandler
} from './rollup/types';
import { BuildPhase } from './utils/buildPhase';
import { getChunkAssignments } from './utils/chunkAssignment';
import { errDeprecation, error } from './utils/error';
import { analyseModuleExecution, sortByExecutionOrder } from './utils/executionOrder';
import { resolve } from './utils/path';
import { PluginDriver } from './utils/PluginDriver';
import relativeId from './utils/relativeId';
import { timeEnd, timeStart } from './utils/timers';
function normalizeEntryModules(
entryModules: string | string[] | Record<string, string>
): UnresolvedModule[] {
if (typeof entryModules === 'string') {
return [{ fileName: null, name: null, id: entryModules, importer: undefined }];
}
if (Array.isArray(entryModules)) {
return entryModules.map(id => ({ fileName: null, name: null, id, importer: undefined }));
}
return Object.keys(entryModules).map(name => ({
fileName: null,
id: entryModules[name],
importer: undefined,
name
}));
}
export default class Graph {
acornOptions: acorn.Options;
acornParser: typeof acorn.Parser;
cachedModules: Map<string, ModuleJSON>;
contextParse: (code: string, acornOptions?: acorn.Options) => acorn.Node;
deoptimizationTracker: PathTracker;
getModuleContext: (id: string) => string;
moduleById = new Map<string, Module | ExternalModule>();
moduleLoader: ModuleLoader;
needsTreeshakingPass = false;
phase: BuildPhase = BuildPhase.LOAD_AND_PARSE;
pluginDriver: PluginDriver;
preserveModules: boolean;
scope: GlobalScope;
shimMissingExports: boolean;
treeshakingOptions?: TreeshakingOptions;
watchFiles: Record<string, true> = Object.create(null);
private cacheExpiry: number;
private context: string;
private externalModules: ExternalModule[] = [];
private modules: Module[] = [];
private onwarn: WarningHandler;
private pluginCache?: Record<string, SerializablePluginCache>;
private strictDeprecations: boolean;
constructor(options: InputOptions, watcher: RollupWatcher | null) {
this.onwarn = options.onwarn as WarningHandler;
this.deoptimizationTracker = new PathTracker();
this.cachedModules = new Map();
if (options.cache) {
if (options.cache.modules)
for (const module of options.cache.modules) this.cachedModules.set(module.id, module);
}
if (options.cache !== false) {
this.pluginCache = (options.cache && options.cache.plugins) || Object.create(null);
// increment access counter
for (const name in this.pluginCache) {
const cache = this.pluginCache[name];
for (const key of Object.keys(cache)) cache[key][0]++;
}
}
this.preserveModules = options.preserveModules!;
this.strictDeprecations = options.strictDeprecations!;
this.cacheExpiry = options.experimentalCacheExpiry!;
if (options.treeshake !== false) {
this.treeshakingOptions =
options.treeshake && options.treeshake !== true
? {
annotations: options.treeshake.annotations !== false,
moduleSideEffects: options.treeshake.moduleSideEffects,
propertyReadSideEffects: options.treeshake.propertyReadSideEffects !== false,
pureExternalModules: options.treeshake.pureExternalModules,
tryCatchDeoptimization: options.treeshake.tryCatchDeoptimization !== false,
unknownGlobalSideEffects: options.treeshake.unknownGlobalSideEffects !== false
}
: {
annotations: true,
moduleSideEffects: true,
propertyReadSideEffects: true,
tryCatchDeoptimization: true,
unknownGlobalSideEffects: true
};
if (typeof this.treeshakingOptions.pureExternalModules !== 'undefined') {
this.warnDeprecation(
`The "treeshake.pureExternalModules" option is deprecated. The "treeshake.moduleSideEffects" option should be used instead. "treeshake.pureExternalModules: true" is equivalent to "treeshake.moduleSideEffects: 'no-external'"`,
true
);
}
}
this.contextParse = (code: string, options: acorn.Options = {}) =>
this.acornParser.parse(code, {
...defaultAcornOptions,
...options,
...this.acornOptions
});
this.pluginDriver = new PluginDriver(this, options.plugins!, this.pluginCache);
if (watcher) {
const handleChange = (id: string) => this.pluginDriver.hookSeqSync('watchChange', [id]);
watcher.on('change', handleChange);
watcher.once('restart', () => {
watcher.removeListener('change', handleChange);
});
}
this.shimMissingExports = options.shimMissingExports as boolean;
this.scope = new GlobalScope();
this.context = String(options.context);
const optionsModuleContext = options.moduleContext;
if (typeof optionsModuleContext === 'function') {
this.getModuleContext = id => optionsModuleContext(id) || this.context;
} else if (typeof optionsModuleContext === 'object') {
const moduleContext = new Map();
for (const key in optionsModuleContext) {
moduleContext.set(resolve(key), optionsModuleContext[key]);
}
this.getModuleContext = id => moduleContext.get(id) || this.context;
} else {
this.getModuleContext = () => this.context;
}
this.acornOptions = options.acorn ? { ...options.acorn } : {};
const acornPluginsToInject: Function[] = [];
acornPluginsToInject.push(
injectImportMeta,
injectExportNsFrom,
injectClassFields,
injectStaticClassFeatures
);
(this.acornOptions as any).allowAwaitOutsideFunction = true;
const acornInjectPlugins = options.acornInjectPlugins;
acornPluginsToInject.push(
...(Array.isArray(acornInjectPlugins)
? acornInjectPlugins
: acornInjectPlugins
? [acornInjectPlugins]
: [])
);
this.acornParser = acorn.Parser.extend(...(acornPluginsToInject as any));
this.moduleLoader = new ModuleLoader(
this,
this.moduleById,
this.pluginDriver,
options.preserveSymlinks === true,
options.external as (string | RegExp)[] | IsExternal,
(typeof options.manualChunks === 'function' && options.manualChunks) as GetManualChunk | null,
(this.treeshakingOptions ? this.treeshakingOptions.moduleSideEffects : null)!,
(this.treeshakingOptions ? this.treeshakingOptions.pureExternalModules : false)!
);
}
| (
entryModules: string | string[] | Record<string, string>,
manualChunks: ManualChunksOption | void,
inlineDynamicImports: boolean
): Promise<Chunk[]> {
// Phase 1 – discovery. We load the entry module and find which
// modules it imports, and import those, until we have all
// of the entry module's dependencies
timeStart('parse modules', 2);
return Promise.all([
this.moduleLoader.addEntryModules(normalizeEntryModules(entryModules), true),
(manualChunks &&
typeof manualChunks === 'object' &&
this.moduleLoader.addManualChunks(manualChunks)) as Promise<void>
]).then(([{ entryModules, manualChunkModulesByAlias }]) => {
if (entryModules.length === 0) {
throw new Error('You must supply options.input to rollup');
}
for (const module of this.moduleById.values()) {
if (module instanceof Module) {
this.modules.push(module);
} else {
this.externalModules.push(module);
}
}
timeEnd('parse modules', 2);
this.phase = BuildPhase.ANALYSE;
// Phase 2 - linking. We populate the module dependency links and
// determine the topological execution order for the bundle
timeStart('analyse dependency graph', 2);
this.link(entryModules);
timeEnd('analyse dependency graph', 2);
// Phase 3 – marking. We include all statements that should be included
timeStart('mark included statements', 2);
for (const module of entryModules) {
module.includeAllExports();
}
this.includeMarked(this.modules);
// check for unused external imports
for (const externalModule of this.externalModules) externalModule.warnUnusedImports();
timeEnd('mark included statements', 2);
// Phase 4 – we construct the chunks, working out the optimal chunking using
// entry point graph colouring, before generating the import and export facades
timeStart('generate chunks', 2);
// TODO: there is one special edge case unhandled here and that is that any module
// exposed as an unresolvable export * (to a graph external export *,
// either as a namespace import reexported or top-level export *)
// should be made to be its own entry point module before chunking
const chunks: Chunk[] = [];
if (this.preserveModules) {
for (const module of this.modules) {
if (
module.isIncluded() ||
module.isEntryPoint ||
module.dynamicallyImportedBy.length > 0
) {
const chunk = new Chunk(this, [module]);
chunk.entryModules = [module];
chunks.push(chunk);
}
}
} else {
for (const chunkModules of inlineDynamicImports
? [this.modules]
: getChunkAssignments(entryModules, manualChunkModulesByAlias)) {
sortByExecutionOrder(chunkModules);
chunks.push(new Chunk(this, chunkModules));
}
}
for (const chunk of chunks) {
chunk.link();
}
const facades: Chunk[] = [];
for (const chunk of chunks) {
facades.push(...chunk.generateFacades());
}
timeEnd('generate chunks', 2);
this.phase = BuildPhase.GENERATE;
return [...chunks, ...facades];
});
}
getCache(): RollupCache {
// handle plugin cache eviction
for (const name in this.pluginCache) {
const cache = this.pluginCache[name];
let allDeleted = true;
for (const key of Object.keys(cache)) {
if (cache[key][0] >= this.cacheExpiry) delete cache[key];
else allDeleted = false;
}
if (allDeleted) delete this.pluginCache[name];
}
return {
modules: this.modules.map(module => module.toJSON()),
plugins: this.pluginCache
};
}
includeMarked(modules: Module[]) {
if (this.treeshakingOptions) {
let treeshakingPass = 1;
do {
timeStart(`treeshaking pass ${treeshakingPass}`, 3);
this.needsTreeshakingPass = false;
for (const module of modules) {
if (module.isExecuted) module.include();
}
timeEnd(`treeshaking pass ${treeshakingPass++}`, 3);
} while (this.needsTreeshakingPass);
} else {
// Necessary to properly replace namespace imports
for (const module of modules) module.includeAllInBundle();
}
}
warn(warning: RollupWarning) {
warning.toString = () => {
let str = '';
if (warning.plugin) str += `(${warning.plugin} plugin) `;
if (warning.loc)
str += `${relativeId(warning.loc.file!)} (${warning.loc.line}:${warning.loc.column}) `;
str += warning.message;
return str;
};
this.onwarn(warning);
}
warnDeprecation(deprecation: string | RollupWarning, activeDeprecation: boolean): void {
if (activeDeprecation || this.strictDeprecations) {
const warning = errDeprecation(deprecation);
if (this.strictDeprecations) {
return error(warning);
}
this.warn(warning);
}
}
private link(entryModules: Module[]) {
for (const module of this.modules) {
module.linkDependencies();
}
const { orderedModules, cyclePaths } = analyseModuleExecution(entryModules);
for (const cyclePath of cyclePaths) {
this.warn({
code: 'CIRCULAR_DEPENDENCY',
cycle: cyclePath,
importer: cyclePath[0],
message: `Circular dependency: ${cyclePath.join(' -> ')}`
});
}
this.modules = orderedModules;
for (const module of this.modules) {
module.bindReferences();
}
this.warnForMissingExports();
}
private warnForMissingExports() {
for (const module of this.modules) {
for (const importName of Object.keys(module.importDescriptions)) {
const importDescription = module.importDescriptions[importName];
if (
importDescription.name !== '*' &&
!(importDescription.module as Module).getVariableForExportName(importDescription.name)
) {
module.warn(
{
code: 'NON_EXISTENT_EXPORT',
message: `Non-existent export '${
importDescription.name
}' is imported from ${relativeId((importDescription.module as Module).id)}`,
name: importDescription.name,
source: (importDescription.module as Module).id
},
importDescription.start
);
}
}
}
}
}
| build | identifier_name |
reg_adv_train_loop.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import models.drn as drn
from models.DRNSeg import DRNSeg
from models.FCN32s import FCN32s
import data_transforms as transforms
import json
import math
import os
from os.path import exists, join, split
import threading
import time, datetime
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from learning.utils_learn import *
from learning.dataloader import SegList, SegListMS, get_info, get_loader
import logging
from learning.validate import validate
import data_transforms as transforms
from dataloaders.utils import decode_segmap
from torch.utils.tensorboard import SummaryWriter
from learning.attack import PGD_attack
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
log_epsilon = 1e-20
epsilon = 1e-10
def ensemble_entropy(y_pred):
num_pred = y_pred.size(2) * y_pred.size(3)
entropy_type = "sum_entropy"
if entropy_type == "all_entropy":
flag_pred = y_pred.view(y_pred.size(0), -1)
entropy = torch.sum(-flag_pred * torch.log(flag_pred + log_epsilon)) #TODO: here, even sum the batch dim
elif entropy_type == "sum_entropy": # Pang et al
sum_score = torch.sum(torch.sum(y_pred, dim=3), dim=2) / num_pred
entropy = torch.sum(-sum_score * torch.log(sum_score + log_epsilon))
# print("\n_____Debugging entropy", "y_pred.shape", y_pred.shape,
# # "sum", torch.sum(y_pred, axis=3).shape,
# # "sum of sum", torch.sum(torch.sum(y_pred, axis=3), axis=2).shape,
# "num_pred", num_pred,
# "sum_score", sum_score,
# "entropy", entropy, torch.log(sum_score + log_epsilon),"\n",
# # "individual elements", np.where(y_pred.detach().numpy() < 0)[0].shape,
# "\n\n")
elif entropy_type == "mutual_info": # borrow the mutual information idea, where it is the total entropy - mean of individual entropy
pass
return entropy
# def log_det(y_true, y_pred, num_model=FLAGS.num_models):
# bool_R_y_true = tf.not_equal(tf.ones_like(y_true) - y_true, zero) # batch_size X (num_class X num_models), 2-D
# mask_non_y_pred = tf.boolean_mask(y_pred, bool_R_y_true) # batch_size X (num_class-1) X num_models, 1-D
# mask_non_y_pred = tf.reshape(mask_non_y_pred, [-1, num_model, num_classes-1]) # batch_size X num_model X (num_class-1), 3-D
# mask_non_y_pred = mask_non_y_pred / tf.norm(mask_non_y_pred, axis=2, keepdims=True) # batch_size X num_model X (num_class-1), 3-D
# matrix = tf.matmul(mask_non_y_pred, tf.transpose(mask_non_y_pred, perm=[0, 2, 1])) # batch_size X num_model X num_model, 3-D
# all_log_det = tf.linalg.logdet(matrix+det_offset*tf.expand_dims(tf.eye(num_model),0)) # batch_size X 1, 1-D
# return all_log_det
#TODO: also a function with global logdet:
def log_det_global_cuda():
pass
#TODO: this is a local logdet loss
def log_det_cuda(y_pred, y_class_true, args, neglect = 255): # We need to max Diversity for each class
# TODO: we should first down sampling before move on (like dropout 99%)
delta_det = 1e-3
drop_ratio = 1 - args.drop_ratio
# y_true need to be one hot
# print('class num', y_pred.size(1))
# mark_neglect = torch.ones_like(y_class_true) * neglect == y_class_true
if torch.cuda.is_available():
mark_neglect = torch.cuda.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * neglect == y_class_true
else:
mark_neglect = torch.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(
1) * neglect == y_class_true
# print("mask", torch.max(mark_neglect))
y_class_true = y_class_true * (1 - mark_neglect.long())
# print('max 18? Yes, it is , starting from 0 then 18', y_class_true.max())
y_class_true = y_class_true * (1 - mark_neglect.long()) + (y_pred.size(1)) * mark_neglect.long() # we put 18 + 1 as the neglect class
# print('lab', torch.max(y_class_true))
# print(y_pred.size(1)+1)
if torch.cuda.is_available():
y_class_true = y_class_true.cuda()
y_true = torch.nn.functional.one_hot(y_class_true, y_pred.size(1)+1)
# print('one hot size', y_true.size())
y_true = y_true[:,:,:,:y_pred.size(1)]
# print('one hot size', y_true.size(), 'pred size', y_pred.size())
if torch.cuda.is_available():
non_max_mask = (torch.ones_like(y_true).cuda() - y_true) != torch.zeros_like(y_true).cuda()
else:
non_max_mask = (torch.ones_like(y_true) - y_true) != torch.zeros_like(y_true)
non_max_mask = torch.transpose(non_max_mask, dim0=1, dim1=3).float()
# print('non_max_mask', non_max_mask.size())
# print("HERE", non_max_mask.shape, y_pred.shape)
mask_non_y_pred = non_max_mask * y_pred
mask_non_y_pred = mask_non_y_pred.view(-1, mask_non_y_pred.size(1), mask_non_y_pred.size(2) * mask_non_y_pred.size(3))
mask_non_y_pred = torch.transpose(mask_non_y_pred, dim0=1, dim1=2) # batch * num_pixel * class_num
#TODO: now , we look at the diversity within the groundtruth class
# class_categories = set(y_class_true.cpu().numpy())
det_loss = 0
together = False
if together:
if torch.cuda.is_available():
drop_approximate = torch.cuda.FloatTensor(mask_non_y_pred.size(0), mask_non_y_pred.size(1), 1).uniform_() > drop_ratio
else:
drop_approximate = torch.FloatTensor(mask_non_y_pred.size(0), mask_non_y_pred.size(1), 1).uniform_() > drop_ratio
drop_approximate = drop_approximate.repeat(1, 1, mask_non_y_pred.size(2))
select_element = mask_non_y_pred[drop_approximate]
element_same_class = select_element.view(-1, y_pred.size(1))
element_same_class = element_same_class / ((torch.sum(element_same_class ** 2, dim=1).unsqueeze(
1)+ epsilon) ** 0.5) # pixel_num * fea_len TODO: need epsilon or result in NAN
matrix = torch.mm(element_same_class,
torch.transpose(element_same_class, dim0=0, dim1=1)) # batch * pixel_num * pixel_num
if torch.cuda.is_available(): # TODO: can be written in a shorter way
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)).cuda())
else:
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)))
## print("logdet this", logdet_loss)
det_loss = logdet_loss
else:
for each_category in range(y_pred.size(1)):
# ind_select = torch.ones_like(y_class_true).cuda() * each_category == y_class_true
if torch.cuda.is_available():
ind_select = torch.cuda.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * each_category == y_class_true
else:
ind_select = torch.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * each_category == y_class_true
# print(ind_select.size())
if torch.cuda.is_available():
drop_approximate = torch.cuda.FloatTensor(ind_select.size()).uniform_() > drop_ratio
else:
drop_approximate = torch.FloatTensor(ind_select.size()).uniform_() > drop_ratio
ind_select = ind_select * drop_approximate
# If the class exist in the input
if torch.sum(ind_select) > 0:
# print('each cate', each_category)
flat_ind_select = ind_select.view(-1, ind_select.size(1)*ind_select.size(2)) # batch * num_pixel
flat_ind_select = flat_ind_select.unsqueeze(2) # batch * num_pixel * 1
flat_ind_select = flat_ind_select.repeat(1, 1, mask_non_y_pred.size(2))
batch_wise = True
if batch_wise:
# iterating over each batch # can be replace with a batch global one, will check the running time
for batch_i in range(flat_ind_select.size(0)):
mask_non_y_pred_b = mask_non_y_pred[batch_i]
flat_ind_select_b = flat_ind_select[batch_i]
if torch.sum(flat_ind_select_b)==0:
continue
element_same_class = mask_non_y_pred_b[flat_ind_select_b] # selecting the predict score only for that "category" class;
# we expect the length shrink
# size: batch * num_pixel * feature_length
# TODO: check how to reshape this back !I have checked, this is correct.
element_same_class = element_same_class.view(-1, y_pred.size(1))
# print('after reshape', element_same_class)
# print("\nELEMENT_SAME_CLASS2 ","#blank elem",np.isnan(element_same_class.clone().detach().numpy()).sum(),"#0s ",len(np.where(element_same_class.clone().detach().numpy() ==0.)[0]),np.max(element_same_class.clone().detach().numpy()), np.min(element_same_class.clone().detach().numpy()))
# TODO: Normalize the score feature vector for each pixel
#TODO: it is crucial to add epsilon inside the root operation follows in order to prevent NAN during BP
# So I think norm should be preferred
element_same_class = element_same_class / (((torch.sum(element_same_class ** 2, dim=1).unsqueeze(1))+ epsilon) ** 0.5 + epsilon) # pixel_num * fea_len
# element_same_class = element_same_class / torch.norm(element_same_class, dim=2, keepdim=True)
matrix = torch.mm(element_same_class, torch.transpose(element_same_class, dim0=0, dim1=1)) # batch * pixel_num * pixel_num
if torch.cuda.is_available(): #TODO: can be written in a shorter way
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)).cuda())
else:
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)))
det_loss += logdet_loss
else:
element_same_class = mask_non_y_pred[flat_ind_select] | element_same_class = element_same_class.view(-1, y_pred.size(1))
element_same_class = element_same_class / (torch.norm(element_same_class, dim=1, keepdim=True) + epsilon) #TODO: divided , you need epsilon to prevent NAN
matrix = torch.mm(element_same_class,
torch.transpose(element_same_class, dim0=0, dim1=1)) # pixel_num * pixel_num
#
logdet_loss = torch.logdet(matrix + delta_det * torch.eye(matrix.size(1).cuda()))
det_loss += logdet_loss
return det_loss
#TODO: Can we use GAN to align the variance? Use Pang et al? Maximum the overall entropy?
def train_seg_reg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
# print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=True)
if args.pretrained and args.loading:
print('args.pretrained', args.pretrained)
single_model.load_state_dict(torch.load(args.pretrained))
out_dir = 'output/{}_{:03d}_{}'.format(args.arch, 0, args.phase)
model = torch.nn.DataParallel(single_model)
criterion = nn.NLLLoss(ignore_index=255)
if torch.cuda.is_available():
model.cuda()
criterion.cuda()
# Data loading code
info = get_info(args.dataset)
train_loader = get_loader(args, "train")
val_loader = get_loader(args, "val", out_name=True)
adv_val_loader = get_loader(args, "adv_val", out_name=True)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# Backup files before resuming/starting training
backup_output_dir = args.backup_output_dir
os.makedirs(backup_output_dir, exist_ok=True)
if os.path.exists(backup_output_dir):
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
experiment_backup_folder = "reg_adv_train_" + args.arch + "_" + args.dataset + "_" + timestamp
experiment_backup_folder = os.path.join(backup_output_dir, experiment_backup_folder)
print(experiment_backup_folder)
shutil.copytree('.', experiment_backup_folder, ignore=include_patterns('*.py', '*.json'))
# Logging with TensorBoard
log_dir = os.path.join(experiment_backup_folder, "runs")
val_writer = SummaryWriter(log_dir=log_dir + '/validate_runs/')
writer = SummaryWriter(log_dir=log_dir)
fh = logging.FileHandler(experiment_backup_folder + '/log.txt')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# optionally resume from a checkpoint
if args.resume:
print("resuming", args.resume)
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion,args=args,log_dir=experiment_backup_folder, eval_score=accuracy, info=info)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
reg_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, args.dataset,
eval_score=accuracy)
# evaluate on validation set
prec = validate(val_loader, model, criterion, args=args,log_dir=experiment_backup_folder, eval_score=accuracy,
info=info, writer=val_writer, epoch=epoch)
if epoch % args.val_freq:
from learning.validate import validate_adv
mAP = validate_adv(adv_val_loader, model, args.classes, save_vis=True,
has_gt=True, output_dir=out_dir, downsize_scale=args.downsize_scale,
args=args, info=info, writer=val_writer, epoch=epoch)
logger.info('adv mAP: %f', mAP)
# writer.add_scalar('Reg_Adv_Validate/prec', prec, epoch)
writer.add_scalar('Reg_Adv_Validate/mAP', mAP, epoch)
is_best = prec > best_prec1
if is_best:
best_prec1 = max(mAP, best_prec1)
save_model_path = os.path.join(experiment_backup_folder, 'savecheckpoint')
os.makedirs(save_model_path, exist_ok=True)
checkpoint_path = os.path.join(save_model_path, 'checkpoint_latest.pth.tar')
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path, save_model_path=save_model_path)
if (epoch + 1) % 1 == 0:
# history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
history_path = os.path.join(save_model_path, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
shutil.copyfile(checkpoint_path, history_path)
writer.close()
def reg_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, dataset,
eval_score=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
reg_losses = AverageMeter()
entropy_losses = AverageMeter()
classify_losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
print("Standard Training + Regularization" if not args.adv_train else "Adversarial Training + Regularization")
for i, (input, target) in enumerate(train_loader):
# print('target', target)
# measure data loading time
data_time.update(time.time() - end)
if args.adv_train:
adv_img = PGD_attack(input, target, model, criterion, args.epsilon, args.steps, args.dataset,
args.step_size, info, using_noise=True)
else:
adv_img = input
# input = input.cuda()
# print('diff', (adv_img.data-input) / (args.epsilon))
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
# TODO: adversarial training
clean_input = input
input = adv_img.data
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
clean_input = clean_input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target, requires_grad=False)
clean_input_var = torch.autograd.Variable(clean_input)
# compute output
# output = model(input_var)[0]
output, _, softmax_output = model(input_var)
cla_loss = criterion(output, target_var)
# TODO: we are random sampling this, we may want to do several times
reg_loss_total = 0
for iii in range(args.MC_times):
reg_loss = log_det_cuda(softmax_output, target_var, args) #y_true, y_pred, y_class_true
reg_loss_total += reg_loss
reg_term = args.reg_lambda / args.MC_times * reg_loss_total
# entropy_loss = reg_term
# entropy_loss = 0
entropy_loss = args.entropy_lambda * ensemble_entropy(softmax_output)
loss = cla_loss - reg_term - entropy_loss
losses.update(loss.data.item(), input.size(0))
classify_losses.update(cla_loss.data.item(), input.size(0))
reg_losses.update(reg_term.data.item(), input.size(0))
entropy_losses.update(entropy_loss.data.item(), input.size(0))
if eval_score is not None:
if target_var.size(0)>0:
scores.update(eval_score(output, target_var), input.size(0))
else:
print("0 size!")
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (args.debug):
print_freq = 10
if i % (args.print_freq // args.batch_size) == 0:
# Convert target and prediction to rgb images to visualise
class_prediction = torch.argmax(output, dim=1)
decoded_target = decode_segmap(target[0].cpu().numpy() if torch.cuda.is_available() else target[0].numpy(),
dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().numpy() if torch.cuda.is_available() else class_prediction[0].numpy(),
dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Xent Loss {classify_losses.val:.4f} ({classify_losses.avg:.4f})\t'
'Reg Loss {reg_losses.val:.4f} ({reg_losses.avg:.4f})\t'
'Entropy Loss {entro_losses.val:.4f} ({entro_losses.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, classify_losses=classify_losses, reg_losses=reg_losses,
entro_losses = entropy_losses, loss=losses, top1=scores))
# compute output for the case when clean image is passed.
clean_output = model(clean_input_var)[0]
clean_loss = criterion(clean_output, target_var)
if eval_score is not None:
if target_var.size(0) > 0:
clean_score = eval_score(clean_output, target_var)
else:
print("0 size!")
clean_score = 0
writer.add_image('Image/adv image ', back_transform(input_var, info)[0])
writer.add_image('Image/clean image ', back_transform(clean_input_var, info)[0])
writer.add_image('Image/image target ', decoded_target)
writer.add_image('Image/image prediction ', decoded_class_prediction)
writer.add_scalar('Reg_Adv_Train/Score', scores.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Loss', losses.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Clean_Score', clean_score, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Clean_Loss', clean_loss, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Classify_Loss', classify_losses.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Reg_Loss', reg_losses.val, epoch * len(train_loader) + i)
if args.debug and i==(args.print_freq // args.batch_size)*10: #breaking after 10 images.
break
# break | #TODO: check how to reshape this back !!!!!!!!! | random_line_split |
reg_adv_train_loop.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import models.drn as drn
from models.DRNSeg import DRNSeg
from models.FCN32s import FCN32s
import data_transforms as transforms
import json
import math
import os
from os.path import exists, join, split
import threading
import time, datetime
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from learning.utils_learn import *
from learning.dataloader import SegList, SegListMS, get_info, get_loader
import logging
from learning.validate import validate
import data_transforms as transforms
from dataloaders.utils import decode_segmap
from torch.utils.tensorboard import SummaryWriter
from learning.attack import PGD_attack
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
log_epsilon = 1e-20
epsilon = 1e-10
def ensemble_entropy(y_pred):
num_pred = y_pred.size(2) * y_pred.size(3)
entropy_type = "sum_entropy"
if entropy_type == "all_entropy":
flag_pred = y_pred.view(y_pred.size(0), -1)
entropy = torch.sum(-flag_pred * torch.log(flag_pred + log_epsilon)) #TODO: here, even sum the batch dim
elif entropy_type == "sum_entropy": # Pang et al
sum_score = torch.sum(torch.sum(y_pred, dim=3), dim=2) / num_pred
entropy = torch.sum(-sum_score * torch.log(sum_score + log_epsilon))
# print("\n_____Debugging entropy", "y_pred.shape", y_pred.shape,
# # "sum", torch.sum(y_pred, axis=3).shape,
# # "sum of sum", torch.sum(torch.sum(y_pred, axis=3), axis=2).shape,
# "num_pred", num_pred,
# "sum_score", sum_score,
# "entropy", entropy, torch.log(sum_score + log_epsilon),"\n",
# # "individual elements", np.where(y_pred.detach().numpy() < 0)[0].shape,
# "\n\n")
elif entropy_type == "mutual_info": # borrow the mutual information idea, where it is the total entropy - mean of individual entropy
pass
return entropy
# def log_det(y_true, y_pred, num_model=FLAGS.num_models):
# bool_R_y_true = tf.not_equal(tf.ones_like(y_true) - y_true, zero) # batch_size X (num_class X num_models), 2-D
# mask_non_y_pred = tf.boolean_mask(y_pred, bool_R_y_true) # batch_size X (num_class-1) X num_models, 1-D
# mask_non_y_pred = tf.reshape(mask_non_y_pred, [-1, num_model, num_classes-1]) # batch_size X num_model X (num_class-1), 3-D
# mask_non_y_pred = mask_non_y_pred / tf.norm(mask_non_y_pred, axis=2, keepdims=True) # batch_size X num_model X (num_class-1), 3-D
# matrix = tf.matmul(mask_non_y_pred, tf.transpose(mask_non_y_pred, perm=[0, 2, 1])) # batch_size X num_model X num_model, 3-D
# all_log_det = tf.linalg.logdet(matrix+det_offset*tf.expand_dims(tf.eye(num_model),0)) # batch_size X 1, 1-D
# return all_log_det
#TODO: also a function with global logdet:
def | ():
pass
#TODO: this is a local logdet loss
def log_det_cuda(y_pred, y_class_true, args, neglect = 255): # We need to max Diversity for each class
# TODO: we should first down sampling before move on (like dropout 99%)
delta_det = 1e-3
drop_ratio = 1 - args.drop_ratio
# y_true need to be one hot
# print('class num', y_pred.size(1))
# mark_neglect = torch.ones_like(y_class_true) * neglect == y_class_true
if torch.cuda.is_available():
mark_neglect = torch.cuda.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * neglect == y_class_true
else:
mark_neglect = torch.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(
1) * neglect == y_class_true
# print("mask", torch.max(mark_neglect))
y_class_true = y_class_true * (1 - mark_neglect.long())
# print('max 18? Yes, it is , starting from 0 then 18', y_class_true.max())
y_class_true = y_class_true * (1 - mark_neglect.long()) + (y_pred.size(1)) * mark_neglect.long() # we put 18 + 1 as the neglect class
# print('lab', torch.max(y_class_true))
# print(y_pred.size(1)+1)
if torch.cuda.is_available():
y_class_true = y_class_true.cuda()
y_true = torch.nn.functional.one_hot(y_class_true, y_pred.size(1)+1)
# print('one hot size', y_true.size())
y_true = y_true[:,:,:,:y_pred.size(1)]
# print('one hot size', y_true.size(), 'pred size', y_pred.size())
if torch.cuda.is_available():
non_max_mask = (torch.ones_like(y_true).cuda() - y_true) != torch.zeros_like(y_true).cuda()
else:
non_max_mask = (torch.ones_like(y_true) - y_true) != torch.zeros_like(y_true)
non_max_mask = torch.transpose(non_max_mask, dim0=1, dim1=3).float()
# print('non_max_mask', non_max_mask.size())
# print("HERE", non_max_mask.shape, y_pred.shape)
mask_non_y_pred = non_max_mask * y_pred
mask_non_y_pred = mask_non_y_pred.view(-1, mask_non_y_pred.size(1), mask_non_y_pred.size(2) * mask_non_y_pred.size(3))
mask_non_y_pred = torch.transpose(mask_non_y_pred, dim0=1, dim1=2) # batch * num_pixel * class_num
#TODO: now , we look at the diversity within the groundtruth class
# class_categories = set(y_class_true.cpu().numpy())
det_loss = 0
together = False
if together:
if torch.cuda.is_available():
drop_approximate = torch.cuda.FloatTensor(mask_non_y_pred.size(0), mask_non_y_pred.size(1), 1).uniform_() > drop_ratio
else:
drop_approximate = torch.FloatTensor(mask_non_y_pred.size(0), mask_non_y_pred.size(1), 1).uniform_() > drop_ratio
drop_approximate = drop_approximate.repeat(1, 1, mask_non_y_pred.size(2))
select_element = mask_non_y_pred[drop_approximate]
element_same_class = select_element.view(-1, y_pred.size(1))
element_same_class = element_same_class / ((torch.sum(element_same_class ** 2, dim=1).unsqueeze(
1)+ epsilon) ** 0.5) # pixel_num * fea_len TODO: need epsilon or result in NAN
matrix = torch.mm(element_same_class,
torch.transpose(element_same_class, dim0=0, dim1=1)) # batch * pixel_num * pixel_num
if torch.cuda.is_available(): # TODO: can be written in a shorter way
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)).cuda())
else:
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)))
## print("logdet this", logdet_loss)
det_loss = logdet_loss
else:
for each_category in range(y_pred.size(1)):
# ind_select = torch.ones_like(y_class_true).cuda() * each_category == y_class_true
if torch.cuda.is_available():
ind_select = torch.cuda.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * each_category == y_class_true
else:
ind_select = torch.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * each_category == y_class_true
# print(ind_select.size())
if torch.cuda.is_available():
drop_approximate = torch.cuda.FloatTensor(ind_select.size()).uniform_() > drop_ratio
else:
drop_approximate = torch.FloatTensor(ind_select.size()).uniform_() > drop_ratio
ind_select = ind_select * drop_approximate
# If the class exist in the input
if torch.sum(ind_select) > 0:
# print('each cate', each_category)
flat_ind_select = ind_select.view(-1, ind_select.size(1)*ind_select.size(2)) # batch * num_pixel
flat_ind_select = flat_ind_select.unsqueeze(2) # batch * num_pixel * 1
flat_ind_select = flat_ind_select.repeat(1, 1, mask_non_y_pred.size(2))
batch_wise = True
if batch_wise:
# iterating over each batch # can be replace with a batch global one, will check the running time
for batch_i in range(flat_ind_select.size(0)):
mask_non_y_pred_b = mask_non_y_pred[batch_i]
flat_ind_select_b = flat_ind_select[batch_i]
if torch.sum(flat_ind_select_b)==0:
continue
element_same_class = mask_non_y_pred_b[flat_ind_select_b] # selecting the predict score only for that "category" class;
# we expect the length shrink
# size: batch * num_pixel * feature_length
# TODO: check how to reshape this back !I have checked, this is correct.
element_same_class = element_same_class.view(-1, y_pred.size(1))
# print('after reshape', element_same_class)
# print("\nELEMENT_SAME_CLASS2 ","#blank elem",np.isnan(element_same_class.clone().detach().numpy()).sum(),"#0s ",len(np.where(element_same_class.clone().detach().numpy() ==0.)[0]),np.max(element_same_class.clone().detach().numpy()), np.min(element_same_class.clone().detach().numpy()))
# TODO: Normalize the score feature vector for each pixel
#TODO: it is crucial to add epsilon inside the root operation follows in order to prevent NAN during BP
# So I think norm should be preferred
element_same_class = element_same_class / (((torch.sum(element_same_class ** 2, dim=1).unsqueeze(1))+ epsilon) ** 0.5 + epsilon) # pixel_num * fea_len
# element_same_class = element_same_class / torch.norm(element_same_class, dim=2, keepdim=True)
matrix = torch.mm(element_same_class, torch.transpose(element_same_class, dim0=0, dim1=1)) # batch * pixel_num * pixel_num
if torch.cuda.is_available(): #TODO: can be written in a shorter way
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)).cuda())
else:
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)))
det_loss += logdet_loss
else:
element_same_class = mask_non_y_pred[flat_ind_select]
#TODO: check how to reshape this back !!!!!!!!!
element_same_class = element_same_class.view(-1, y_pred.size(1))
element_same_class = element_same_class / (torch.norm(element_same_class, dim=1, keepdim=True) + epsilon) #TODO: divided , you need epsilon to prevent NAN
matrix = torch.mm(element_same_class,
torch.transpose(element_same_class, dim0=0, dim1=1)) # pixel_num * pixel_num
#
logdet_loss = torch.logdet(matrix + delta_det * torch.eye(matrix.size(1).cuda()))
det_loss += logdet_loss
return det_loss
#TODO: Can we use GAN to align the variance? Use Pang et al? Maximum the overall entropy?
def train_seg_reg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
# print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=True)
if args.pretrained and args.loading:
print('args.pretrained', args.pretrained)
single_model.load_state_dict(torch.load(args.pretrained))
out_dir = 'output/{}_{:03d}_{}'.format(args.arch, 0, args.phase)
model = torch.nn.DataParallel(single_model)
criterion = nn.NLLLoss(ignore_index=255)
if torch.cuda.is_available():
model.cuda()
criterion.cuda()
# Data loading code
info = get_info(args.dataset)
train_loader = get_loader(args, "train")
val_loader = get_loader(args, "val", out_name=True)
adv_val_loader = get_loader(args, "adv_val", out_name=True)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# Backup files before resuming/starting training
backup_output_dir = args.backup_output_dir
os.makedirs(backup_output_dir, exist_ok=True)
if os.path.exists(backup_output_dir):
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
experiment_backup_folder = "reg_adv_train_" + args.arch + "_" + args.dataset + "_" + timestamp
experiment_backup_folder = os.path.join(backup_output_dir, experiment_backup_folder)
print(experiment_backup_folder)
shutil.copytree('.', experiment_backup_folder, ignore=include_patterns('*.py', '*.json'))
# Logging with TensorBoard
log_dir = os.path.join(experiment_backup_folder, "runs")
val_writer = SummaryWriter(log_dir=log_dir + '/validate_runs/')
writer = SummaryWriter(log_dir=log_dir)
fh = logging.FileHandler(experiment_backup_folder + '/log.txt')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# optionally resume from a checkpoint
if args.resume:
print("resuming", args.resume)
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion,args=args,log_dir=experiment_backup_folder, eval_score=accuracy, info=info)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
reg_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, args.dataset,
eval_score=accuracy)
# evaluate on validation set
prec = validate(val_loader, model, criterion, args=args,log_dir=experiment_backup_folder, eval_score=accuracy,
info=info, writer=val_writer, epoch=epoch)
if epoch % args.val_freq:
from learning.validate import validate_adv
mAP = validate_adv(adv_val_loader, model, args.classes, save_vis=True,
has_gt=True, output_dir=out_dir, downsize_scale=args.downsize_scale,
args=args, info=info, writer=val_writer, epoch=epoch)
logger.info('adv mAP: %f', mAP)
# writer.add_scalar('Reg_Adv_Validate/prec', prec, epoch)
writer.add_scalar('Reg_Adv_Validate/mAP', mAP, epoch)
is_best = prec > best_prec1
if is_best:
best_prec1 = max(mAP, best_prec1)
save_model_path = os.path.join(experiment_backup_folder, 'savecheckpoint')
os.makedirs(save_model_path, exist_ok=True)
checkpoint_path = os.path.join(save_model_path, 'checkpoint_latest.pth.tar')
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path, save_model_path=save_model_path)
if (epoch + 1) % 1 == 0:
# history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
history_path = os.path.join(save_model_path, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
shutil.copyfile(checkpoint_path, history_path)
writer.close()
def reg_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, dataset,
eval_score=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
reg_losses = AverageMeter()
entropy_losses = AverageMeter()
classify_losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
print("Standard Training + Regularization" if not args.adv_train else "Adversarial Training + Regularization")
for i, (input, target) in enumerate(train_loader):
# print('target', target)
# measure data loading time
data_time.update(time.time() - end)
if args.adv_train:
adv_img = PGD_attack(input, target, model, criterion, args.epsilon, args.steps, args.dataset,
args.step_size, info, using_noise=True)
else:
adv_img = input
# input = input.cuda()
# print('diff', (adv_img.data-input) / (args.epsilon))
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
# TODO: adversarial training
clean_input = input
input = adv_img.data
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
clean_input = clean_input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target, requires_grad=False)
clean_input_var = torch.autograd.Variable(clean_input)
# compute output
# output = model(input_var)[0]
output, _, softmax_output = model(input_var)
cla_loss = criterion(output, target_var)
# TODO: we are random sampling this, we may want to do several times
reg_loss_total = 0
for iii in range(args.MC_times):
reg_loss = log_det_cuda(softmax_output, target_var, args) #y_true, y_pred, y_class_true
reg_loss_total += reg_loss
reg_term = args.reg_lambda / args.MC_times * reg_loss_total
# entropy_loss = reg_term
# entropy_loss = 0
entropy_loss = args.entropy_lambda * ensemble_entropy(softmax_output)
loss = cla_loss - reg_term - entropy_loss
losses.update(loss.data.item(), input.size(0))
classify_losses.update(cla_loss.data.item(), input.size(0))
reg_losses.update(reg_term.data.item(), input.size(0))
entropy_losses.update(entropy_loss.data.item(), input.size(0))
if eval_score is not None:
if target_var.size(0)>0:
scores.update(eval_score(output, target_var), input.size(0))
else:
print("0 size!")
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (args.debug):
print_freq = 10
if i % (args.print_freq // args.batch_size) == 0:
# Convert target and prediction to rgb images to visualise
class_prediction = torch.argmax(output, dim=1)
decoded_target = decode_segmap(target[0].cpu().numpy() if torch.cuda.is_available() else target[0].numpy(),
dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().numpy() if torch.cuda.is_available() else class_prediction[0].numpy(),
dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Xent Loss {classify_losses.val:.4f} ({classify_losses.avg:.4f})\t'
'Reg Loss {reg_losses.val:.4f} ({reg_losses.avg:.4f})\t'
'Entropy Loss {entro_losses.val:.4f} ({entro_losses.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, classify_losses=classify_losses, reg_losses=reg_losses,
entro_losses = entropy_losses, loss=losses, top1=scores))
# compute output for the case when clean image is passed.
clean_output = model(clean_input_var)[0]
clean_loss = criterion(clean_output, target_var)
if eval_score is not None:
if target_var.size(0) > 0:
clean_score = eval_score(clean_output, target_var)
else:
print("0 size!")
clean_score = 0
writer.add_image('Image/adv image ', back_transform(input_var, info)[0])
writer.add_image('Image/clean image ', back_transform(clean_input_var, info)[0])
writer.add_image('Image/image target ', decoded_target)
writer.add_image('Image/image prediction ', decoded_class_prediction)
writer.add_scalar('Reg_Adv_Train/Score', scores.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Loss', losses.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Clean_Score', clean_score, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Clean_Loss', clean_loss, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Classify_Loss', classify_losses.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Reg_Loss', reg_losses.val, epoch * len(train_loader) + i)
if args.debug and i==(args.print_freq // args.batch_size)*10: #breaking after 10 images.
break
# break
| log_det_global_cuda | identifier_name |
reg_adv_train_loop.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import models.drn as drn
from models.DRNSeg import DRNSeg
from models.FCN32s import FCN32s
import data_transforms as transforms
import json
import math
import os
from os.path import exists, join, split
import threading
import time, datetime
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from learning.utils_learn import *
from learning.dataloader import SegList, SegListMS, get_info, get_loader
import logging
from learning.validate import validate
import data_transforms as transforms
from dataloaders.utils import decode_segmap
from torch.utils.tensorboard import SummaryWriter
from learning.attack import PGD_attack
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
log_epsilon = 1e-20
epsilon = 1e-10
def ensemble_entropy(y_pred):
num_pred = y_pred.size(2) * y_pred.size(3)
entropy_type = "sum_entropy"
if entropy_type == "all_entropy":
flag_pred = y_pred.view(y_pred.size(0), -1)
entropy = torch.sum(-flag_pred * torch.log(flag_pred + log_epsilon)) #TODO: here, even sum the batch dim
elif entropy_type == "sum_entropy": # Pang et al
sum_score = torch.sum(torch.sum(y_pred, dim=3), dim=2) / num_pred
entropy = torch.sum(-sum_score * torch.log(sum_score + log_epsilon))
# print("\n_____Debugging entropy", "y_pred.shape", y_pred.shape,
# # "sum", torch.sum(y_pred, axis=3).shape,
# # "sum of sum", torch.sum(torch.sum(y_pred, axis=3), axis=2).shape,
# "num_pred", num_pred,
# "sum_score", sum_score,
# "entropy", entropy, torch.log(sum_score + log_epsilon),"\n",
# # "individual elements", np.where(y_pred.detach().numpy() < 0)[0].shape,
# "\n\n")
elif entropy_type == "mutual_info": # borrow the mutual information idea, where it is the total entropy - mean of individual entropy
pass
return entropy
# def log_det(y_true, y_pred, num_model=FLAGS.num_models):
# bool_R_y_true = tf.not_equal(tf.ones_like(y_true) - y_true, zero) # batch_size X (num_class X num_models), 2-D
# mask_non_y_pred = tf.boolean_mask(y_pred, bool_R_y_true) # batch_size X (num_class-1) X num_models, 1-D
# mask_non_y_pred = tf.reshape(mask_non_y_pred, [-1, num_model, num_classes-1]) # batch_size X num_model X (num_class-1), 3-D
# mask_non_y_pred = mask_non_y_pred / tf.norm(mask_non_y_pred, axis=2, keepdims=True) # batch_size X num_model X (num_class-1), 3-D
# matrix = tf.matmul(mask_non_y_pred, tf.transpose(mask_non_y_pred, perm=[0, 2, 1])) # batch_size X num_model X num_model, 3-D
# all_log_det = tf.linalg.logdet(matrix+det_offset*tf.expand_dims(tf.eye(num_model),0)) # batch_size X 1, 1-D
# return all_log_det
#TODO: also a function with global logdet:
def log_det_global_cuda():
pass
#TODO: this is a local logdet loss
def log_det_cuda(y_pred, y_class_true, args, neglect = 255): # We need to max Diversity for each class
# TODO: we should first down sampling before move on (like dropout 99%)
delta_det = 1e-3
drop_ratio = 1 - args.drop_ratio
# y_true need to be one hot
# print('class num', y_pred.size(1))
# mark_neglect = torch.ones_like(y_class_true) * neglect == y_class_true
if torch.cuda.is_available():
mark_neglect = torch.cuda.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * neglect == y_class_true
else:
mark_neglect = torch.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(
1) * neglect == y_class_true
# print("mask", torch.max(mark_neglect))
y_class_true = y_class_true * (1 - mark_neglect.long())
# print('max 18? Yes, it is , starting from 0 then 18', y_class_true.max())
y_class_true = y_class_true * (1 - mark_neglect.long()) + (y_pred.size(1)) * mark_neglect.long() # we put 18 + 1 as the neglect class
# print('lab', torch.max(y_class_true))
# print(y_pred.size(1)+1)
if torch.cuda.is_available():
y_class_true = y_class_true.cuda()
y_true = torch.nn.functional.one_hot(y_class_true, y_pred.size(1)+1)
# print('one hot size', y_true.size())
y_true = y_true[:,:,:,:y_pred.size(1)]
# print('one hot size', y_true.size(), 'pred size', y_pred.size())
if torch.cuda.is_available():
non_max_mask = (torch.ones_like(y_true).cuda() - y_true) != torch.zeros_like(y_true).cuda()
else:
non_max_mask = (torch.ones_like(y_true) - y_true) != torch.zeros_like(y_true)
non_max_mask = torch.transpose(non_max_mask, dim0=1, dim1=3).float()
# print('non_max_mask', non_max_mask.size())
# print("HERE", non_max_mask.shape, y_pred.shape)
mask_non_y_pred = non_max_mask * y_pred
mask_non_y_pred = mask_non_y_pred.view(-1, mask_non_y_pred.size(1), mask_non_y_pred.size(2) * mask_non_y_pred.size(3))
mask_non_y_pred = torch.transpose(mask_non_y_pred, dim0=1, dim1=2) # batch * num_pixel * class_num
#TODO: now , we look at the diversity within the groundtruth class
# class_categories = set(y_class_true.cpu().numpy())
det_loss = 0
together = False
if together:
if torch.cuda.is_available():
drop_approximate = torch.cuda.FloatTensor(mask_non_y_pred.size(0), mask_non_y_pred.size(1), 1).uniform_() > drop_ratio
else:
drop_approximate = torch.FloatTensor(mask_non_y_pred.size(0), mask_non_y_pred.size(1), 1).uniform_() > drop_ratio
drop_approximate = drop_approximate.repeat(1, 1, mask_non_y_pred.size(2))
select_element = mask_non_y_pred[drop_approximate]
element_same_class = select_element.view(-1, y_pred.size(1))
element_same_class = element_same_class / ((torch.sum(element_same_class ** 2, dim=1).unsqueeze(
1)+ epsilon) ** 0.5) # pixel_num * fea_len TODO: need epsilon or result in NAN
matrix = torch.mm(element_same_class,
torch.transpose(element_same_class, dim0=0, dim1=1)) # batch * pixel_num * pixel_num
if torch.cuda.is_available(): # TODO: can be written in a shorter way
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)).cuda())
else:
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)))
## print("logdet this", logdet_loss)
det_loss = logdet_loss
else:
for each_category in range(y_pred.size(1)):
# ind_select = torch.ones_like(y_class_true).cuda() * each_category == y_class_true
if torch.cuda.is_available():
ind_select = torch.cuda.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * each_category == y_class_true
else:
ind_select = torch.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * each_category == y_class_true
# print(ind_select.size())
if torch.cuda.is_available():
drop_approximate = torch.cuda.FloatTensor(ind_select.size()).uniform_() > drop_ratio
else:
drop_approximate = torch.FloatTensor(ind_select.size()).uniform_() > drop_ratio
ind_select = ind_select * drop_approximate
# If the class exist in the input
if torch.sum(ind_select) > 0:
# print('each cate', each_category)
flat_ind_select = ind_select.view(-1, ind_select.size(1)*ind_select.size(2)) # batch * num_pixel
flat_ind_select = flat_ind_select.unsqueeze(2) # batch * num_pixel * 1
flat_ind_select = flat_ind_select.repeat(1, 1, mask_non_y_pred.size(2))
batch_wise = True
if batch_wise:
# iterating over each batch # can be replace with a batch global one, will check the running time
for batch_i in range(flat_ind_select.size(0)):
mask_non_y_pred_b = mask_non_y_pred[batch_i]
flat_ind_select_b = flat_ind_select[batch_i]
if torch.sum(flat_ind_select_b)==0:
continue
element_same_class = mask_non_y_pred_b[flat_ind_select_b] # selecting the predict score only for that "category" class;
# we expect the length shrink
# size: batch * num_pixel * feature_length
# TODO: check how to reshape this back !I have checked, this is correct.
element_same_class = element_same_class.view(-1, y_pred.size(1))
# print('after reshape', element_same_class)
# print("\nELEMENT_SAME_CLASS2 ","#blank elem",np.isnan(element_same_class.clone().detach().numpy()).sum(),"#0s ",len(np.where(element_same_class.clone().detach().numpy() ==0.)[0]),np.max(element_same_class.clone().detach().numpy()), np.min(element_same_class.clone().detach().numpy()))
# TODO: Normalize the score feature vector for each pixel
#TODO: it is crucial to add epsilon inside the root operation follows in order to prevent NAN during BP
# So I think norm should be preferred
element_same_class = element_same_class / (((torch.sum(element_same_class ** 2, dim=1).unsqueeze(1))+ epsilon) ** 0.5 + epsilon) # pixel_num * fea_len
# element_same_class = element_same_class / torch.norm(element_same_class, dim=2, keepdim=True)
matrix = torch.mm(element_same_class, torch.transpose(element_same_class, dim0=0, dim1=1)) # batch * pixel_num * pixel_num
if torch.cuda.is_available(): #TODO: can be written in a shorter way
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)).cuda())
else:
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)))
det_loss += logdet_loss
else:
|
return det_loss
#TODO: Can we use GAN to align the variance? Use Pang et al? Maximum the overall entropy?
def train_seg_reg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
# print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=True)
if args.pretrained and args.loading:
print('args.pretrained', args.pretrained)
single_model.load_state_dict(torch.load(args.pretrained))
out_dir = 'output/{}_{:03d}_{}'.format(args.arch, 0, args.phase)
model = torch.nn.DataParallel(single_model)
criterion = nn.NLLLoss(ignore_index=255)
if torch.cuda.is_available():
model.cuda()
criterion.cuda()
# Data loading code
info = get_info(args.dataset)
train_loader = get_loader(args, "train")
val_loader = get_loader(args, "val", out_name=True)
adv_val_loader = get_loader(args, "adv_val", out_name=True)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# Backup files before resuming/starting training
backup_output_dir = args.backup_output_dir
os.makedirs(backup_output_dir, exist_ok=True)
if os.path.exists(backup_output_dir):
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
experiment_backup_folder = "reg_adv_train_" + args.arch + "_" + args.dataset + "_" + timestamp
experiment_backup_folder = os.path.join(backup_output_dir, experiment_backup_folder)
print(experiment_backup_folder)
shutil.copytree('.', experiment_backup_folder, ignore=include_patterns('*.py', '*.json'))
# Logging with TensorBoard
log_dir = os.path.join(experiment_backup_folder, "runs")
val_writer = SummaryWriter(log_dir=log_dir + '/validate_runs/')
writer = SummaryWriter(log_dir=log_dir)
fh = logging.FileHandler(experiment_backup_folder + '/log.txt')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# optionally resume from a checkpoint
if args.resume:
print("resuming", args.resume)
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion,args=args,log_dir=experiment_backup_folder, eval_score=accuracy, info=info)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
reg_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, args.dataset,
eval_score=accuracy)
# evaluate on validation set
prec = validate(val_loader, model, criterion, args=args,log_dir=experiment_backup_folder, eval_score=accuracy,
info=info, writer=val_writer, epoch=epoch)
if epoch % args.val_freq:
from learning.validate import validate_adv
mAP = validate_adv(adv_val_loader, model, args.classes, save_vis=True,
has_gt=True, output_dir=out_dir, downsize_scale=args.downsize_scale,
args=args, info=info, writer=val_writer, epoch=epoch)
logger.info('adv mAP: %f', mAP)
# writer.add_scalar('Reg_Adv_Validate/prec', prec, epoch)
writer.add_scalar('Reg_Adv_Validate/mAP', mAP, epoch)
is_best = prec > best_prec1
if is_best:
best_prec1 = max(mAP, best_prec1)
save_model_path = os.path.join(experiment_backup_folder, 'savecheckpoint')
os.makedirs(save_model_path, exist_ok=True)
checkpoint_path = os.path.join(save_model_path, 'checkpoint_latest.pth.tar')
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path, save_model_path=save_model_path)
if (epoch + 1) % 1 == 0:
# history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
history_path = os.path.join(save_model_path, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
shutil.copyfile(checkpoint_path, history_path)
writer.close()
def reg_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, dataset,
eval_score=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
reg_losses = AverageMeter()
entropy_losses = AverageMeter()
classify_losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
print("Standard Training + Regularization" if not args.adv_train else "Adversarial Training + Regularization")
for i, (input, target) in enumerate(train_loader):
# print('target', target)
# measure data loading time
data_time.update(time.time() - end)
if args.adv_train:
adv_img = PGD_attack(input, target, model, criterion, args.epsilon, args.steps, args.dataset,
args.step_size, info, using_noise=True)
else:
adv_img = input
# input = input.cuda()
# print('diff', (adv_img.data-input) / (args.epsilon))
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
# TODO: adversarial training
clean_input = input
input = adv_img.data
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
clean_input = clean_input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target, requires_grad=False)
clean_input_var = torch.autograd.Variable(clean_input)
# compute output
# output = model(input_var)[0]
output, _, softmax_output = model(input_var)
cla_loss = criterion(output, target_var)
# TODO: we are random sampling this, we may want to do several times
reg_loss_total = 0
for iii in range(args.MC_times):
reg_loss = log_det_cuda(softmax_output, target_var, args) #y_true, y_pred, y_class_true
reg_loss_total += reg_loss
reg_term = args.reg_lambda / args.MC_times * reg_loss_total
# entropy_loss = reg_term
# entropy_loss = 0
entropy_loss = args.entropy_lambda * ensemble_entropy(softmax_output)
loss = cla_loss - reg_term - entropy_loss
losses.update(loss.data.item(), input.size(0))
classify_losses.update(cla_loss.data.item(), input.size(0))
reg_losses.update(reg_term.data.item(), input.size(0))
entropy_losses.update(entropy_loss.data.item(), input.size(0))
if eval_score is not None:
if target_var.size(0)>0:
scores.update(eval_score(output, target_var), input.size(0))
else:
print("0 size!")
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (args.debug):
print_freq = 10
if i % (args.print_freq // args.batch_size) == 0:
# Convert target and prediction to rgb images to visualise
class_prediction = torch.argmax(output, dim=1)
decoded_target = decode_segmap(target[0].cpu().numpy() if torch.cuda.is_available() else target[0].numpy(),
dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().numpy() if torch.cuda.is_available() else class_prediction[0].numpy(),
dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Xent Loss {classify_losses.val:.4f} ({classify_losses.avg:.4f})\t'
'Reg Loss {reg_losses.val:.4f} ({reg_losses.avg:.4f})\t'
'Entropy Loss {entro_losses.val:.4f} ({entro_losses.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, classify_losses=classify_losses, reg_losses=reg_losses,
entro_losses = entropy_losses, loss=losses, top1=scores))
# compute output for the case when clean image is passed.
clean_output = model(clean_input_var)[0]
clean_loss = criterion(clean_output, target_var)
if eval_score is not None:
if target_var.size(0) > 0:
clean_score = eval_score(clean_output, target_var)
else:
print("0 size!")
clean_score = 0
writer.add_image('Image/adv image ', back_transform(input_var, info)[0])
writer.add_image('Image/clean image ', back_transform(clean_input_var, info)[0])
writer.add_image('Image/image target ', decoded_target)
writer.add_image('Image/image prediction ', decoded_class_prediction)
writer.add_scalar('Reg_Adv_Train/Score', scores.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Loss', losses.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Clean_Score', clean_score, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Clean_Loss', clean_loss, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Classify_Loss', classify_losses.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Reg_Loss', reg_losses.val, epoch * len(train_loader) + i)
if args.debug and i==(args.print_freq // args.batch_size)*10: #breaking after 10 images.
break
# break
| element_same_class = mask_non_y_pred[flat_ind_select]
#TODO: check how to reshape this back !!!!!!!!!
element_same_class = element_same_class.view(-1, y_pred.size(1))
element_same_class = element_same_class / (torch.norm(element_same_class, dim=1, keepdim=True) + epsilon) #TODO: divided , you need epsilon to prevent NAN
matrix = torch.mm(element_same_class,
torch.transpose(element_same_class, dim0=0, dim1=1)) # pixel_num * pixel_num
#
logdet_loss = torch.logdet(matrix + delta_det * torch.eye(matrix.size(1).cuda()))
det_loss += logdet_loss | conditional_block |
reg_adv_train_loop.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import models.drn as drn
from models.DRNSeg import DRNSeg
from models.FCN32s import FCN32s
import data_transforms as transforms
import json
import math
import os
from os.path import exists, join, split
import threading
import time, datetime
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from learning.utils_learn import *
from learning.dataloader import SegList, SegListMS, get_info, get_loader
import logging
from learning.validate import validate
import data_transforms as transforms
from dataloaders.utils import decode_segmap
from torch.utils.tensorboard import SummaryWriter
from learning.attack import PGD_attack
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
log_epsilon = 1e-20
epsilon = 1e-10
def ensemble_entropy(y_pred):
|
# def log_det(y_true, y_pred, num_model=FLAGS.num_models):
# bool_R_y_true = tf.not_equal(tf.ones_like(y_true) - y_true, zero) # batch_size X (num_class X num_models), 2-D
# mask_non_y_pred = tf.boolean_mask(y_pred, bool_R_y_true) # batch_size X (num_class-1) X num_models, 1-D
# mask_non_y_pred = tf.reshape(mask_non_y_pred, [-1, num_model, num_classes-1]) # batch_size X num_model X (num_class-1), 3-D
# mask_non_y_pred = mask_non_y_pred / tf.norm(mask_non_y_pred, axis=2, keepdims=True) # batch_size X num_model X (num_class-1), 3-D
# matrix = tf.matmul(mask_non_y_pred, tf.transpose(mask_non_y_pred, perm=[0, 2, 1])) # batch_size X num_model X num_model, 3-D
# all_log_det = tf.linalg.logdet(matrix+det_offset*tf.expand_dims(tf.eye(num_model),0)) # batch_size X 1, 1-D
# return all_log_det
#TODO: also a function with global logdet:
def log_det_global_cuda():
pass
#TODO: this is a local logdet loss
def log_det_cuda(y_pred, y_class_true, args, neglect = 255): # We need to max Diversity for each class
# TODO: we should first down sampling before move on (like dropout 99%)
delta_det = 1e-3
drop_ratio = 1 - args.drop_ratio
# y_true need to be one hot
# print('class num', y_pred.size(1))
# mark_neglect = torch.ones_like(y_class_true) * neglect == y_class_true
if torch.cuda.is_available():
mark_neglect = torch.cuda.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * neglect == y_class_true
else:
mark_neglect = torch.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(
1) * neglect == y_class_true
# print("mask", torch.max(mark_neglect))
y_class_true = y_class_true * (1 - mark_neglect.long())
# print('max 18? Yes, it is , starting from 0 then 18', y_class_true.max())
y_class_true = y_class_true * (1 - mark_neglect.long()) + (y_pred.size(1)) * mark_neglect.long() # we put 18 + 1 as the neglect class
# print('lab', torch.max(y_class_true))
# print(y_pred.size(1)+1)
if torch.cuda.is_available():
y_class_true = y_class_true.cuda()
y_true = torch.nn.functional.one_hot(y_class_true, y_pred.size(1)+1)
# print('one hot size', y_true.size())
y_true = y_true[:,:,:,:y_pred.size(1)]
# print('one hot size', y_true.size(), 'pred size', y_pred.size())
if torch.cuda.is_available():
non_max_mask = (torch.ones_like(y_true).cuda() - y_true) != torch.zeros_like(y_true).cuda()
else:
non_max_mask = (torch.ones_like(y_true) - y_true) != torch.zeros_like(y_true)
non_max_mask = torch.transpose(non_max_mask, dim0=1, dim1=3).float()
# print('non_max_mask', non_max_mask.size())
# print("HERE", non_max_mask.shape, y_pred.shape)
mask_non_y_pred = non_max_mask * y_pred
mask_non_y_pred = mask_non_y_pred.view(-1, mask_non_y_pred.size(1), mask_non_y_pred.size(2) * mask_non_y_pred.size(3))
mask_non_y_pred = torch.transpose(mask_non_y_pred, dim0=1, dim1=2) # batch * num_pixel * class_num
#TODO: now , we look at the diversity within the groundtruth class
# class_categories = set(y_class_true.cpu().numpy())
det_loss = 0
together = False
if together:
if torch.cuda.is_available():
drop_approximate = torch.cuda.FloatTensor(mask_non_y_pred.size(0), mask_non_y_pred.size(1), 1).uniform_() > drop_ratio
else:
drop_approximate = torch.FloatTensor(mask_non_y_pred.size(0), mask_non_y_pred.size(1), 1).uniform_() > drop_ratio
drop_approximate = drop_approximate.repeat(1, 1, mask_non_y_pred.size(2))
select_element = mask_non_y_pred[drop_approximate]
element_same_class = select_element.view(-1, y_pred.size(1))
element_same_class = element_same_class / ((torch.sum(element_same_class ** 2, dim=1).unsqueeze(
1)+ epsilon) ** 0.5) # pixel_num * fea_len TODO: need epsilon or result in NAN
matrix = torch.mm(element_same_class,
torch.transpose(element_same_class, dim0=0, dim1=1)) # batch * pixel_num * pixel_num
if torch.cuda.is_available(): # TODO: can be written in a shorter way
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)).cuda())
else:
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)))
## print("logdet this", logdet_loss)
det_loss = logdet_loss
else:
for each_category in range(y_pred.size(1)):
# ind_select = torch.ones_like(y_class_true).cuda() * each_category == y_class_true
if torch.cuda.is_available():
ind_select = torch.cuda.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * each_category == y_class_true
else:
ind_select = torch.LongTensor(y_class_true.size(0), y_class_true.size(1), y_class_true.size(2)).fill_(1) * each_category == y_class_true
# print(ind_select.size())
if torch.cuda.is_available():
drop_approximate = torch.cuda.FloatTensor(ind_select.size()).uniform_() > drop_ratio
else:
drop_approximate = torch.FloatTensor(ind_select.size()).uniform_() > drop_ratio
ind_select = ind_select * drop_approximate
# If the class exist in the input
if torch.sum(ind_select) > 0:
# print('each cate', each_category)
flat_ind_select = ind_select.view(-1, ind_select.size(1)*ind_select.size(2)) # batch * num_pixel
flat_ind_select = flat_ind_select.unsqueeze(2) # batch * num_pixel * 1
flat_ind_select = flat_ind_select.repeat(1, 1, mask_non_y_pred.size(2))
batch_wise = True
if batch_wise:
# iterating over each batch # can be replace with a batch global one, will check the running time
for batch_i in range(flat_ind_select.size(0)):
mask_non_y_pred_b = mask_non_y_pred[batch_i]
flat_ind_select_b = flat_ind_select[batch_i]
if torch.sum(flat_ind_select_b)==0:
continue
element_same_class = mask_non_y_pred_b[flat_ind_select_b] # selecting the predict score only for that "category" class;
# we expect the length shrink
# size: batch * num_pixel * feature_length
# TODO: check how to reshape this back !I have checked, this is correct.
element_same_class = element_same_class.view(-1, y_pred.size(1))
# print('after reshape', element_same_class)
# print("\nELEMENT_SAME_CLASS2 ","#blank elem",np.isnan(element_same_class.clone().detach().numpy()).sum(),"#0s ",len(np.where(element_same_class.clone().detach().numpy() ==0.)[0]),np.max(element_same_class.clone().detach().numpy()), np.min(element_same_class.clone().detach().numpy()))
# TODO: Normalize the score feature vector for each pixel
#TODO: it is crucial to add epsilon inside the root operation follows in order to prevent NAN during BP
# So I think norm should be preferred
element_same_class = element_same_class / (((torch.sum(element_same_class ** 2, dim=1).unsqueeze(1))+ epsilon) ** 0.5 + epsilon) # pixel_num * fea_len
# element_same_class = element_same_class / torch.norm(element_same_class, dim=2, keepdim=True)
matrix = torch.mm(element_same_class, torch.transpose(element_same_class, dim0=0, dim1=1)) # batch * pixel_num * pixel_num
if torch.cuda.is_available(): #TODO: can be written in a shorter way
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)).cuda())
else:
logdet_loss = torch.logdet(matrix[0] + delta_det * torch.eye(matrix.size(1)))
det_loss += logdet_loss
else:
element_same_class = mask_non_y_pred[flat_ind_select]
#TODO: check how to reshape this back !!!!!!!!!
element_same_class = element_same_class.view(-1, y_pred.size(1))
element_same_class = element_same_class / (torch.norm(element_same_class, dim=1, keepdim=True) + epsilon) #TODO: divided , you need epsilon to prevent NAN
matrix = torch.mm(element_same_class,
torch.transpose(element_same_class, dim0=0, dim1=1)) # pixel_num * pixel_num
#
logdet_loss = torch.logdet(matrix + delta_det * torch.eye(matrix.size(1).cuda()))
det_loss += logdet_loss
return det_loss
#TODO: Can we use GAN to align the variance? Use Pang et al? Maximum the overall entropy?
def train_seg_reg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
# print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=True)
if args.pretrained and args.loading:
print('args.pretrained', args.pretrained)
single_model.load_state_dict(torch.load(args.pretrained))
out_dir = 'output/{}_{:03d}_{}'.format(args.arch, 0, args.phase)
model = torch.nn.DataParallel(single_model)
criterion = nn.NLLLoss(ignore_index=255)
if torch.cuda.is_available():
model.cuda()
criterion.cuda()
# Data loading code
info = get_info(args.dataset)
train_loader = get_loader(args, "train")
val_loader = get_loader(args, "val", out_name=True)
adv_val_loader = get_loader(args, "adv_val", out_name=True)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# Backup files before resuming/starting training
backup_output_dir = args.backup_output_dir
os.makedirs(backup_output_dir, exist_ok=True)
if os.path.exists(backup_output_dir):
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
experiment_backup_folder = "reg_adv_train_" + args.arch + "_" + args.dataset + "_" + timestamp
experiment_backup_folder = os.path.join(backup_output_dir, experiment_backup_folder)
print(experiment_backup_folder)
shutil.copytree('.', experiment_backup_folder, ignore=include_patterns('*.py', '*.json'))
# Logging with TensorBoard
log_dir = os.path.join(experiment_backup_folder, "runs")
val_writer = SummaryWriter(log_dir=log_dir + '/validate_runs/')
writer = SummaryWriter(log_dir=log_dir)
fh = logging.FileHandler(experiment_backup_folder + '/log.txt')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# optionally resume from a checkpoint
if args.resume:
print("resuming", args.resume)
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion,args=args,log_dir=experiment_backup_folder, eval_score=accuracy, info=info)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
reg_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, args.dataset,
eval_score=accuracy)
# evaluate on validation set
prec = validate(val_loader, model, criterion, args=args,log_dir=experiment_backup_folder, eval_score=accuracy,
info=info, writer=val_writer, epoch=epoch)
if epoch % args.val_freq:
from learning.validate import validate_adv
mAP = validate_adv(adv_val_loader, model, args.classes, save_vis=True,
has_gt=True, output_dir=out_dir, downsize_scale=args.downsize_scale,
args=args, info=info, writer=val_writer, epoch=epoch)
logger.info('adv mAP: %f', mAP)
# writer.add_scalar('Reg_Adv_Validate/prec', prec, epoch)
writer.add_scalar('Reg_Adv_Validate/mAP', mAP, epoch)
is_best = prec > best_prec1
if is_best:
best_prec1 = max(mAP, best_prec1)
save_model_path = os.path.join(experiment_backup_folder, 'savecheckpoint')
os.makedirs(save_model_path, exist_ok=True)
checkpoint_path = os.path.join(save_model_path, 'checkpoint_latest.pth.tar')
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path, save_model_path=save_model_path)
if (epoch + 1) % 1 == 0:
# history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
history_path = os.path.join(save_model_path, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
shutil.copyfile(checkpoint_path, history_path)
writer.close()
def reg_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, dataset,
eval_score=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
reg_losses = AverageMeter()
entropy_losses = AverageMeter()
classify_losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
print("Standard Training + Regularization" if not args.adv_train else "Adversarial Training + Regularization")
for i, (input, target) in enumerate(train_loader):
# print('target', target)
# measure data loading time
data_time.update(time.time() - end)
if args.adv_train:
adv_img = PGD_attack(input, target, model, criterion, args.epsilon, args.steps, args.dataset,
args.step_size, info, using_noise=True)
else:
adv_img = input
# input = input.cuda()
# print('diff', (adv_img.data-input) / (args.epsilon))
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
# TODO: adversarial training
clean_input = input
input = adv_img.data
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
clean_input = clean_input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target, requires_grad=False)
clean_input_var = torch.autograd.Variable(clean_input)
# compute output
# output = model(input_var)[0]
output, _, softmax_output = model(input_var)
cla_loss = criterion(output, target_var)
# TODO: we are random sampling this, we may want to do several times
reg_loss_total = 0
for iii in range(args.MC_times):
reg_loss = log_det_cuda(softmax_output, target_var, args) #y_true, y_pred, y_class_true
reg_loss_total += reg_loss
reg_term = args.reg_lambda / args.MC_times * reg_loss_total
# entropy_loss = reg_term
# entropy_loss = 0
entropy_loss = args.entropy_lambda * ensemble_entropy(softmax_output)
loss = cla_loss - reg_term - entropy_loss
losses.update(loss.data.item(), input.size(0))
classify_losses.update(cla_loss.data.item(), input.size(0))
reg_losses.update(reg_term.data.item(), input.size(0))
entropy_losses.update(entropy_loss.data.item(), input.size(0))
if eval_score is not None:
if target_var.size(0)>0:
scores.update(eval_score(output, target_var), input.size(0))
else:
print("0 size!")
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (args.debug):
print_freq = 10
if i % (args.print_freq // args.batch_size) == 0:
# Convert target and prediction to rgb images to visualise
class_prediction = torch.argmax(output, dim=1)
decoded_target = decode_segmap(target[0].cpu().numpy() if torch.cuda.is_available() else target[0].numpy(),
dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().numpy() if torch.cuda.is_available() else class_prediction[0].numpy(),
dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Xent Loss {classify_losses.val:.4f} ({classify_losses.avg:.4f})\t'
'Reg Loss {reg_losses.val:.4f} ({reg_losses.avg:.4f})\t'
'Entropy Loss {entro_losses.val:.4f} ({entro_losses.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, classify_losses=classify_losses, reg_losses=reg_losses,
entro_losses = entropy_losses, loss=losses, top1=scores))
# compute output for the case when clean image is passed.
clean_output = model(clean_input_var)[0]
clean_loss = criterion(clean_output, target_var)
if eval_score is not None:
if target_var.size(0) > 0:
clean_score = eval_score(clean_output, target_var)
else:
print("0 size!")
clean_score = 0
writer.add_image('Image/adv image ', back_transform(input_var, info)[0])
writer.add_image('Image/clean image ', back_transform(clean_input_var, info)[0])
writer.add_image('Image/image target ', decoded_target)
writer.add_image('Image/image prediction ', decoded_class_prediction)
writer.add_scalar('Reg_Adv_Train/Score', scores.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Loss', losses.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Clean_Score', clean_score, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Clean_Loss', clean_loss, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Classify_Loss', classify_losses.val, epoch * len(train_loader) + i)
writer.add_scalar('Reg_Adv_Train/Reg_Loss', reg_losses.val, epoch * len(train_loader) + i)
if args.debug and i==(args.print_freq // args.batch_size)*10: #breaking after 10 images.
break
# break
| num_pred = y_pred.size(2) * y_pred.size(3)
entropy_type = "sum_entropy"
if entropy_type == "all_entropy":
flag_pred = y_pred.view(y_pred.size(0), -1)
entropy = torch.sum(-flag_pred * torch.log(flag_pred + log_epsilon)) #TODO: here, even sum the batch dim
elif entropy_type == "sum_entropy": # Pang et al
sum_score = torch.sum(torch.sum(y_pred, dim=3), dim=2) / num_pred
entropy = torch.sum(-sum_score * torch.log(sum_score + log_epsilon))
# print("\n_____Debugging entropy", "y_pred.shape", y_pred.shape,
# # "sum", torch.sum(y_pred, axis=3).shape,
# # "sum of sum", torch.sum(torch.sum(y_pred, axis=3), axis=2).shape,
# "num_pred", num_pred,
# "sum_score", sum_score,
# "entropy", entropy, torch.log(sum_score + log_epsilon),"\n",
# # "individual elements", np.where(y_pred.detach().numpy() < 0)[0].shape,
# "\n\n")
elif entropy_type == "mutual_info": # borrow the mutual information idea, where it is the total entropy - mean of individual entropy
pass
return entropy | identifier_body |
resnet_trainer.py | #!/usr/bin/python
import os
import shutil
import time
from IPython.display import Image
# import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import os.path
import DataLoader
# from densenet_modified import *
import sys
# Trainer parameters
print_freq_epochs = 100
use_cuda = True
# Dataset Parameters
batch_size = 32
load_size = 256 | # Training parameters
# architecture = 'resnet34'
# architecture = 'vgg16_bn'
# architecture = 'dense'
lr = 0.1 # densenet default = 0.1,
lr_init = 0.1
momentum = 0.90 # densenet default = 0.9
weight_decay = 1e-3 # densenet default = 1e-4
num_epochs = 125
dummy_text_file = open("dummy_text.txt", "w")
def construct_dataloader_disk():
# Construct DataLoader
opt_data_train = {
#'data_h5': 'miniplaces_128_train.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/train.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': True
}
opt_data_val = {
#'data_h5': 'miniplaces_128_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/val.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_train = DataLoader.DataLoaderDisk(**opt_data_train)
loader_val = DataLoader.DataLoaderDisk(**opt_data_val)
return (loader_train, loader_val)
def construct_dataloader_disk_trainval():
opt_data_trainval = {
#'data_h5': 'miniplaces_128_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/trainval.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_valtrain = DataLoader.DataLoaderDisk(**opt_data_trainval)
return (loader_valtrain)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(lr, optimizer, epoch):
"""Calculates a learning rate of the initial LR decayed by 10 every 30 epochs"""
lr = lr_init * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# def adjust_learning_rate(lr, optimizer, epoch): # for densenet (201)
# """Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs"""
# lr = lr_init * (0.1 ** (epoch // 20)) * (0.1 ** (epoch // 50))
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(filename, model, state, is_best, epoch):
torch.save(state, "models/"+filename) #"densenet121__retraining.tar"
if is_best:
torch.save(model, "results/"+filename)
# train and validate methods adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py
def train(train_loader, model, criterion, optimizer, epoch, text_file):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i in range(int(train_loader.size()/batch_size)):
input, target = train_loader.next_batch(batch_size)
target = target.long()
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
target_var = target_var.long()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq_epochs == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, train_loader.size()/batch_size, batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
text_file.write(str(epoch)+str(",")+str(i)+str(",")+str(batch_time.val)+str(",")+str(data_time.val)+str(",")+str(losses.avg)+str(",")+str(top1.avg)+str(",")+str(top5.avg)+"\n")
def validate(val_loader, model, criterion, text_file, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i in range(int(val_loader.size()/batch_size)):
input, target = val_loader.next_batch(batch_size)
target = target.long()
if use_cuda:
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
target_var = target_var.long()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq_epochs == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, val_loader.size()/batch_size, batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
text_file.write(str("val,")+str(epoch)+","+str(i)+str(",")+str(batch_time.val)+str(",")+str(losses.avg)+str(",")+str(top1.avg)+str(",")+str(top5.avg)+"\n")
return top5.avg
criterion = nn.CrossEntropyLoss()
if use_cuda:
criterion = criterion.cuda()
train_loader, val_loader = construct_dataloader_disk()
trainval_loader = construct_dataloader_disk_trainval()
def trainer(filename, lr, momentum, weight_decay):
# filename = "resnet34"
# model = models.__dict__[filename](num_classes=100, pretrained=False)
model = torch.load("results/"+filename+".pt")
if use_cuda:
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
best_prec5 = 70.0
text_file_train = open("results/"+filename+".txt", "w")
text_file_val = open("results/"+filename+".txt", "w")
for epoch in range(85,num_epochs):
# check for file
if not os.path.isfile(filename+".txt"):
break
lr = adjust_learning_rate(lr, optimizer, epoch) # turn off for Adam
print("learning rate:", lr)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, text_file_train)
# evaluate on validation set
prec5 = validate(val_loader, model, criterion, text_file_val, epoch)
# remember best prec@1 and save checkpoint
is_best = prec5 > best_prec5
best_prec5 = max(prec5, best_prec5)
save_checkpoint(filename+".pt", model, {
'epoch': epoch + 1,
'arch': filename,
'state_dict': model.state_dict(),
'best_prec5': best_prec5,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch)
print("First round of training finished")
model = torch.load("results/"+filename+".pt")
filename_old = filename
filename = filename+"_valtrained"
text_file_train = open("results/"+filename+".txt", "w")
text_file_val = open("results/"+filename+".txt", "w")
print("Training on validation set:")
for epoch in range(num_epochs,num_epochs+15):
# check for file
if not os.path.isfile(filename_old+".txt"):
break
lr = adjust_learning_rate(lr, optimizer, epoch) # turn off for Adam
print("learning rate:", lr) # questionable
# train for one epoch
train(trainval_loader, model, criterion, optimizer, epoch, text_file_train)
# evaluate on validation set
prec5 = validate(val_loader, model, criterion, text_file_val, epoch) # pointless
# remember best prec@1 and save checkpoint
is_best = prec5 > best_prec5
best_prec5 = max(prec5, best_prec5)
save_checkpoint(filename+".pt", model, {
'epoch': epoch + 1,
'arch': filename,
'state_dict': model.state_dict(),
'best_prec5': best_prec5,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch)
return 0
trainer(str(sys.argv[1]),lr, momentum, weight_decay) | fine_size = 224
c = 3
data_mean = np.asarray([0.45834960097,0.44674252445,0.41352266842])
| random_line_split |
resnet_trainer.py | #!/usr/bin/python
import os
import shutil
import time
from IPython.display import Image
# import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import os.path
import DataLoader
# from densenet_modified import *
import sys
# Trainer parameters
print_freq_epochs = 100
use_cuda = True
# Dataset Parameters
batch_size = 32
load_size = 256
fine_size = 224
c = 3
data_mean = np.asarray([0.45834960097,0.44674252445,0.41352266842])
# Training parameters
# architecture = 'resnet34'
# architecture = 'vgg16_bn'
# architecture = 'dense'
lr = 0.1 # densenet default = 0.1,
lr_init = 0.1
momentum = 0.90 # densenet default = 0.9
weight_decay = 1e-3 # densenet default = 1e-4
num_epochs = 125
dummy_text_file = open("dummy_text.txt", "w")
def construct_dataloader_disk():
# Construct DataLoader
opt_data_train = {
#'data_h5': 'miniplaces_128_train.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/train.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': True
}
opt_data_val = {
#'data_h5': 'miniplaces_128_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/val.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_train = DataLoader.DataLoaderDisk(**opt_data_train)
loader_val = DataLoader.DataLoaderDisk(**opt_data_val)
return (loader_train, loader_val)
def construct_dataloader_disk_trainval():
opt_data_trainval = {
#'data_h5': 'miniplaces_128_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/trainval.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_valtrain = DataLoader.DataLoaderDisk(**opt_data_trainval)
return (loader_valtrain)
class AverageMeter(object):
|
def adjust_learning_rate(lr, optimizer, epoch):
"""Calculates a learning rate of the initial LR decayed by 10 every 30 epochs"""
lr = lr_init * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# def adjust_learning_rate(lr, optimizer, epoch): # for densenet (201)
# """Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs"""
# lr = lr_init * (0.1 ** (epoch // 20)) * (0.1 ** (epoch // 50))
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(filename, model, state, is_best, epoch):
torch.save(state, "models/"+filename) #"densenet121__retraining.tar"
if is_best:
torch.save(model, "results/"+filename)
# train and validate methods adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py
def train(train_loader, model, criterion, optimizer, epoch, text_file):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i in range(int(train_loader.size()/batch_size)):
input, target = train_loader.next_batch(batch_size)
target = target.long()
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
target_var = target_var.long()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq_epochs == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, train_loader.size()/batch_size, batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
text_file.write(str(epoch)+str(",")+str(i)+str(",")+str(batch_time.val)+str(",")+str(data_time.val)+str(",")+str(losses.avg)+str(",")+str(top1.avg)+str(",")+str(top5.avg)+"\n")
def validate(val_loader, model, criterion, text_file, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i in range(int(val_loader.size()/batch_size)):
input, target = val_loader.next_batch(batch_size)
target = target.long()
if use_cuda:
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
target_var = target_var.long()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq_epochs == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, val_loader.size()/batch_size, batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
text_file.write(str("val,")+str(epoch)+","+str(i)+str(",")+str(batch_time.val)+str(",")+str(losses.avg)+str(",")+str(top1.avg)+str(",")+str(top5.avg)+"\n")
return top5.avg
criterion = nn.CrossEntropyLoss()
if use_cuda:
criterion = criterion.cuda()
train_loader, val_loader = construct_dataloader_disk()
trainval_loader = construct_dataloader_disk_trainval()
def trainer(filename, lr, momentum, weight_decay):
# filename = "resnet34"
# model = models.__dict__[filename](num_classes=100, pretrained=False)
model = torch.load("results/"+filename+".pt")
if use_cuda:
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
best_prec5 = 70.0
text_file_train = open("results/"+filename+".txt", "w")
text_file_val = open("results/"+filename+".txt", "w")
for epoch in range(85,num_epochs):
# check for file
if not os.path.isfile(filename+".txt"):
break
lr = adjust_learning_rate(lr, optimizer, epoch) # turn off for Adam
print("learning rate:", lr)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, text_file_train)
# evaluate on validation set
prec5 = validate(val_loader, model, criterion, text_file_val, epoch)
# remember best prec@1 and save checkpoint
is_best = prec5 > best_prec5
best_prec5 = max(prec5, best_prec5)
save_checkpoint(filename+".pt", model, {
'epoch': epoch + 1,
'arch': filename,
'state_dict': model.state_dict(),
'best_prec5': best_prec5,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch)
print("First round of training finished")
model = torch.load("results/"+filename+".pt")
filename_old = filename
filename = filename+"_valtrained"
text_file_train = open("results/"+filename+".txt", "w")
text_file_val = open("results/"+filename+".txt", "w")
print("Training on validation set:")
for epoch in range(num_epochs,num_epochs+15):
# check for file
if not os.path.isfile(filename_old+".txt"):
break
lr = adjust_learning_rate(lr, optimizer, epoch) # turn off for Adam
print("learning rate:", lr) # questionable
# train for one epoch
train(trainval_loader, model, criterion, optimizer, epoch, text_file_train)
# evaluate on validation set
prec5 = validate(val_loader, model, criterion, text_file_val, epoch) # pointless
# remember best prec@1 and save checkpoint
is_best = prec5 > best_prec5
best_prec5 = max(prec5, best_prec5)
save_checkpoint(filename+".pt", model, {
'epoch': epoch + 1,
'arch': filename,
'state_dict': model.state_dict(),
'best_prec5': best_prec5,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch)
return 0
trainer(str(sys.argv[1]),lr, momentum, weight_decay)
| """Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count | identifier_body |
resnet_trainer.py | #!/usr/bin/python
import os
import shutil
import time
from IPython.display import Image
# import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import os.path
import DataLoader
# from densenet_modified import *
import sys
# Trainer parameters
print_freq_epochs = 100
use_cuda = True
# Dataset Parameters
batch_size = 32
load_size = 256
fine_size = 224
c = 3
data_mean = np.asarray([0.45834960097,0.44674252445,0.41352266842])
# Training parameters
# architecture = 'resnet34'
# architecture = 'vgg16_bn'
# architecture = 'dense'
lr = 0.1 # densenet default = 0.1,
lr_init = 0.1
momentum = 0.90 # densenet default = 0.9
weight_decay = 1e-3 # densenet default = 1e-4
num_epochs = 125
dummy_text_file = open("dummy_text.txt", "w")
def construct_dataloader_disk():
# Construct DataLoader
opt_data_train = {
#'data_h5': 'miniplaces_128_train.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/train.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': True
}
opt_data_val = {
#'data_h5': 'miniplaces_128_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/val.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_train = DataLoader.DataLoaderDisk(**opt_data_train)
loader_val = DataLoader.DataLoaderDisk(**opt_data_val)
return (loader_train, loader_val)
def construct_dataloader_disk_trainval():
opt_data_trainval = {
#'data_h5': 'miniplaces_128_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/trainval.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_valtrain = DataLoader.DataLoaderDisk(**opt_data_trainval)
return (loader_valtrain)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(lr, optimizer, epoch):
"""Calculates a learning rate of the initial LR decayed by 10 every 30 epochs"""
lr = lr_init * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# def adjust_learning_rate(lr, optimizer, epoch): # for densenet (201)
# """Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs"""
# lr = lr_init * (0.1 ** (epoch // 20)) * (0.1 ** (epoch // 50))
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(filename, model, state, is_best, epoch):
torch.save(state, "models/"+filename) #"densenet121__retraining.tar"
if is_best:
torch.save(model, "results/"+filename)
# train and validate methods adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py
def train(train_loader, model, criterion, optimizer, epoch, text_file):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i in range(int(train_loader.size()/batch_size)):
input, target = train_loader.next_batch(batch_size)
target = target.long()
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
target_var = target_var.long()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq_epochs == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, train_loader.size()/batch_size, batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
text_file.write(str(epoch)+str(",")+str(i)+str(",")+str(batch_time.val)+str(",")+str(data_time.val)+str(",")+str(losses.avg)+str(",")+str(top1.avg)+str(",")+str(top5.avg)+"\n")
def validate(val_loader, model, criterion, text_file, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i in range(int(val_loader.size()/batch_size)):
input, target = val_loader.next_batch(batch_size)
target = target.long()
if use_cuda:
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
target_var = target_var.long()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq_epochs == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, val_loader.size()/batch_size, batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
text_file.write(str("val,")+str(epoch)+","+str(i)+str(",")+str(batch_time.val)+str(",")+str(losses.avg)+str(",")+str(top1.avg)+str(",")+str(top5.avg)+"\n")
return top5.avg
criterion = nn.CrossEntropyLoss()
if use_cuda:
criterion = criterion.cuda()
train_loader, val_loader = construct_dataloader_disk()
trainval_loader = construct_dataloader_disk_trainval()
def | (filename, lr, momentum, weight_decay):
# filename = "resnet34"
# model = models.__dict__[filename](num_classes=100, pretrained=False)
model = torch.load("results/"+filename+".pt")
if use_cuda:
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
best_prec5 = 70.0
text_file_train = open("results/"+filename+".txt", "w")
text_file_val = open("results/"+filename+".txt", "w")
for epoch in range(85,num_epochs):
# check for file
if not os.path.isfile(filename+".txt"):
break
lr = adjust_learning_rate(lr, optimizer, epoch) # turn off for Adam
print("learning rate:", lr)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, text_file_train)
# evaluate on validation set
prec5 = validate(val_loader, model, criterion, text_file_val, epoch)
# remember best prec@1 and save checkpoint
is_best = prec5 > best_prec5
best_prec5 = max(prec5, best_prec5)
save_checkpoint(filename+".pt", model, {
'epoch': epoch + 1,
'arch': filename,
'state_dict': model.state_dict(),
'best_prec5': best_prec5,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch)
print("First round of training finished")
model = torch.load("results/"+filename+".pt")
filename_old = filename
filename = filename+"_valtrained"
text_file_train = open("results/"+filename+".txt", "w")
text_file_val = open("results/"+filename+".txt", "w")
print("Training on validation set:")
for epoch in range(num_epochs,num_epochs+15):
# check for file
if not os.path.isfile(filename_old+".txt"):
break
lr = adjust_learning_rate(lr, optimizer, epoch) # turn off for Adam
print("learning rate:", lr) # questionable
# train for one epoch
train(trainval_loader, model, criterion, optimizer, epoch, text_file_train)
# evaluate on validation set
prec5 = validate(val_loader, model, criterion, text_file_val, epoch) # pointless
# remember best prec@1 and save checkpoint
is_best = prec5 > best_prec5
best_prec5 = max(prec5, best_prec5)
save_checkpoint(filename+".pt", model, {
'epoch': epoch + 1,
'arch': filename,
'state_dict': model.state_dict(),
'best_prec5': best_prec5,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch)
return 0
trainer(str(sys.argv[1]),lr, momentum, weight_decay)
| trainer | identifier_name |
resnet_trainer.py | #!/usr/bin/python
import os
import shutil
import time
from IPython.display import Image
# import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import os.path
import DataLoader
# from densenet_modified import *
import sys
# Trainer parameters
print_freq_epochs = 100
use_cuda = True
# Dataset Parameters
batch_size = 32
load_size = 256
fine_size = 224
c = 3
data_mean = np.asarray([0.45834960097,0.44674252445,0.41352266842])
# Training parameters
# architecture = 'resnet34'
# architecture = 'vgg16_bn'
# architecture = 'dense'
lr = 0.1 # densenet default = 0.1,
lr_init = 0.1
momentum = 0.90 # densenet default = 0.9
weight_decay = 1e-3 # densenet default = 1e-4
num_epochs = 125
dummy_text_file = open("dummy_text.txt", "w")
def construct_dataloader_disk():
# Construct DataLoader
opt_data_train = {
#'data_h5': 'miniplaces_128_train.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/train.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': True
}
opt_data_val = {
#'data_h5': 'miniplaces_128_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/val.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_train = DataLoader.DataLoaderDisk(**opt_data_train)
loader_val = DataLoader.DataLoaderDisk(**opt_data_val)
return (loader_train, loader_val)
def construct_dataloader_disk_trainval():
opt_data_trainval = {
#'data_h5': 'miniplaces_128_val.h5',
'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY
'data_list': '../../data/trainval.txt', # MODIFY PATH ACCORDINGLY
'load_size': load_size,
'fine_size': fine_size,
'data_mean': data_mean,
'randomize': False
}
loader_valtrain = DataLoader.DataLoaderDisk(**opt_data_trainval)
return (loader_valtrain)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(lr, optimizer, epoch):
"""Calculates a learning rate of the initial LR decayed by 10 every 30 epochs"""
lr = lr_init * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# def adjust_learning_rate(lr, optimizer, epoch): # for densenet (201)
# """Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs"""
# lr = lr_init * (0.1 ** (epoch // 20)) * (0.1 ** (epoch // 50))
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(filename, model, state, is_best, epoch):
torch.save(state, "models/"+filename) #"densenet121__retraining.tar"
if is_best:
torch.save(model, "results/"+filename)
# train and validate methods adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py
def train(train_loader, model, criterion, optimizer, epoch, text_file):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i in range(int(train_loader.size()/batch_size)):
input, target = train_loader.next_batch(batch_size)
target = target.long()
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
target_var = target_var.long()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq_epochs == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, train_loader.size()/batch_size, batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
text_file.write(str(epoch)+str(",")+str(i)+str(",")+str(batch_time.val)+str(",")+str(data_time.val)+str(",")+str(losses.avg)+str(",")+str(top1.avg)+str(",")+str(top5.avg)+"\n")
def validate(val_loader, model, criterion, text_file, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i in range(int(val_loader.size()/batch_size)):
input, target = val_loader.next_batch(batch_size)
target = target.long()
if use_cuda:
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
target_var = target_var.long()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq_epochs == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, val_loader.size()/batch_size, batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
text_file.write(str("val,")+str(epoch)+","+str(i)+str(",")+str(batch_time.val)+str(",")+str(losses.avg)+str(",")+str(top1.avg)+str(",")+str(top5.avg)+"\n")
return top5.avg
criterion = nn.CrossEntropyLoss()
if use_cuda:
|
train_loader, val_loader = construct_dataloader_disk()
trainval_loader = construct_dataloader_disk_trainval()
def trainer(filename, lr, momentum, weight_decay):
# filename = "resnet34"
# model = models.__dict__[filename](num_classes=100, pretrained=False)
model = torch.load("results/"+filename+".pt")
if use_cuda:
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
best_prec5 = 70.0
text_file_train = open("results/"+filename+".txt", "w")
text_file_val = open("results/"+filename+".txt", "w")
for epoch in range(85,num_epochs):
# check for file
if not os.path.isfile(filename+".txt"):
break
lr = adjust_learning_rate(lr, optimizer, epoch) # turn off for Adam
print("learning rate:", lr)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, text_file_train)
# evaluate on validation set
prec5 = validate(val_loader, model, criterion, text_file_val, epoch)
# remember best prec@1 and save checkpoint
is_best = prec5 > best_prec5
best_prec5 = max(prec5, best_prec5)
save_checkpoint(filename+".pt", model, {
'epoch': epoch + 1,
'arch': filename,
'state_dict': model.state_dict(),
'best_prec5': best_prec5,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch)
print("First round of training finished")
model = torch.load("results/"+filename+".pt")
filename_old = filename
filename = filename+"_valtrained"
text_file_train = open("results/"+filename+".txt", "w")
text_file_val = open("results/"+filename+".txt", "w")
print("Training on validation set:")
for epoch in range(num_epochs,num_epochs+15):
# check for file
if not os.path.isfile(filename_old+".txt"):
break
lr = adjust_learning_rate(lr, optimizer, epoch) # turn off for Adam
print("learning rate:", lr) # questionable
# train for one epoch
train(trainval_loader, model, criterion, optimizer, epoch, text_file_train)
# evaluate on validation set
prec5 = validate(val_loader, model, criterion, text_file_val, epoch) # pointless
# remember best prec@1 and save checkpoint
is_best = prec5 > best_prec5
best_prec5 = max(prec5, best_prec5)
save_checkpoint(filename+".pt", model, {
'epoch': epoch + 1,
'arch': filename,
'state_dict': model.state_dict(),
'best_prec5': best_prec5,
'optimizer' : optimizer.state_dict(),
}, is_best, epoch)
return 0
trainer(str(sys.argv[1]),lr, momentum, weight_decay)
| criterion = criterion.cuda() | conditional_block |
token.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package token
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"sort"
"time"
"github.com/apigee/apigee-remote-service-cli/cmd/provision"
"github.com/apigee/apigee-remote-service-cli/shared"
"github.com/lestrrat-go/jwx/jwa"
"github.com/lestrrat-go/jwx/jwk"
"github.com/lestrrat-go/jwx/jws"
"github.com/lestrrat-go/jwx/jwt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
)
const (
tokenURLFormat = "%s/token" // RemoteServiceProxyURL
certsURLFormat = "%s/certs" // RemoteServiceProxyURL
rotateURLFormat = "%s/rotate" // RemoteServiceProxyURL
clientCredentialsGrant = "client_credentials"
policySecretNameFormat = "%s-%s-policy-secret"
commonName = "apigee-remote-service"
orgName = "Google LLC"
// hybrid forces specific file extensions! https://docs.apigee.com/hybrid/v1.2/k8s-secrets
jwksSecretKey = "remote-service.crt" // obviously not a .crt, but hybrid will treat as blob
keySecretKey = "remote-service.key"
kidSecretKey = "remote-service.properties"
kidSecretPropFormat = "kid=%s" // KID
)
type token struct {
*shared.RootArgs
clientID string
clientSecret string
file string
keyID string
certExpirationInYears int
certKeyStrength int
namespace string
truncate int
}
// Cmd returns base command
func Cmd(rootArgs *shared.RootArgs, printf shared.FormatFn) *cobra.Command {
t := &token{RootArgs: rootArgs}
c := &cobra.Command{
Use: "token",
Short: "JWT Token Utilities",
Long: "JWT Token Utilities",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
return rootArgs.Resolve(true, true)
},
}
c.AddCommand(cmdCreateToken(t, printf))
c.AddCommand(cmdInspectToken(t, printf))
c.AddCommand(cmdRotateCert(t, printf))
c.AddCommand(cmdCreateSecret(t, printf))
return c
}
func cmdCreateToken(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "create",
Short: "Create a new OAuth token",
Long: "Create a new OAuth token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
token, err := t.createToken(printf)
if err != nil {
return errors.Wrap(err, "creating token")
}
printf(token)
return nil
},
}
c.Flags().StringVarP(&t.clientID, "id", "i", "", "client id")
c.Flags().StringVarP(&t.clientSecret, "secret", "s", "", "client secret")
c.MarkFlagRequired("id")
c.MarkFlagRequired("secret")
return c
}
func cmdInspectToken(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "inspect",
Short: "Inspect a JWT token",
Long: "Inspect a JWT token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
err := t.inspectToken(cmd.InOrStdin(), printf)
if err != nil {
return errors.Wrap(err, "inspecting token")
}
return nil
},
}
c.Flags().StringVarP(&t.file, "file", "f", "", "token file (default: use stdin)")
return c
}
func cmdCreateSecret(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "create-secret",
Short: "create Kubernetes CRDs for JWT tokens (hybrid only)",
Long: "Creates a new Kubernetes Secret CRD for JWT tokens, maintains prior cert(s) for rotation.",
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, _ []string) {
if t.ServerConfig != nil {
t.clientID = t.ServerConfig.Tenant.Key
t.clientSecret = t.ServerConfig.Tenant.Secret
}
t.keyID = time.Now().Format(time.RFC3339)
t.createSecret(printf)
},
}
c.Flags().IntVarP(&t.certExpirationInYears, "years", "", 1, "number of years before the cert expires")
c.Flags().IntVarP(&t.certKeyStrength, "strength", "", 2048, "key strength")
c.Flags().StringVarP(&t.namespace, "namespace", "n", "apigee", "emit Secret in the specified namespace")
c.Flags().IntVarP(&t.truncate, "truncate", "", 2, "number of certs to keep in jwks")
return c
}
func cmdRotateCert(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "rotate-cert",
Short: "rotate JWT certificate (legacy or opdk)",
Long: "Deploys a new private and public key while maintaining the current public key for existing tokens (legacy or opdk).",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
if t.IsGCPManaged |
if t.ServerConfig != nil {
t.clientID = t.ServerConfig.Tenant.Key
t.clientSecret = t.ServerConfig.Tenant.Secret
}
missingFlagNames := []string{}
if t.clientID == "" {
missingFlagNames = append(missingFlagNames, "key")
}
if t.clientSecret == "" {
missingFlagNames = append(missingFlagNames, "secret")
}
if err := t.PrintMissingFlags(missingFlagNames); err != nil {
return err
}
t.rotateCert(printf)
return nil
},
}
c.Flags().StringVarP(&t.keyID, "kid", "", "1", "new key id")
c.Flags().IntVarP(&t.certExpirationInYears, "years", "", 1, "number of years before the cert expires")
c.Flags().IntVarP(&t.certKeyStrength, "strength", "", 2048, "key strength")
c.Flags().StringVarP(&t.clientID, "key", "k", "", "provision key")
c.Flags().StringVarP(&t.clientSecret, "secret", "s", "", "provision secret")
return c
}
func (t *token) createToken(printf shared.FormatFn) (string, error) {
tokenReq := &tokenRequest{
ClientID: t.clientID,
ClientSecret: t.clientSecret,
GrantType: clientCredentialsGrant,
}
body := new(bytes.Buffer)
json.NewEncoder(body).Encode(tokenReq)
tokenURL := fmt.Sprintf(tokenURLFormat, t.RemoteServiceProxyURL)
req, err := http.NewRequest(http.MethodPost, tokenURL, body)
if err != nil {
return "", errors.Wrap(err, "creating request")
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
var tokenRes tokenResponse
resp, err := t.Client.Do(req, &tokenRes)
if err != nil {
return "", errors.Wrap(err, "creating token")
}
defer resp.Body.Close()
return tokenRes.Token, nil
}
func (t *token) inspectToken(in io.Reader, printf shared.FormatFn) error {
var file = in
if t.file != "" {
var err error
file, err = os.Open(t.file)
if err != nil {
return errors.Wrapf(err, "opening file %s", t.file)
}
}
jwtBytes, err := ioutil.ReadAll(file)
if err != nil {
return errors.Wrap(err, "reading jwt token")
}
token, err := jwt.ParseBytes(jwtBytes)
if err != nil {
return errors.Wrap(err, "parsing jwt token")
}
jsonBytes, err := token.MarshalJSON()
if err != nil {
return errors.Wrap(err, "printing jwt token")
}
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, jsonBytes, "", "\t")
if err != nil {
return errors.Wrap(err, "printing jwt token")
}
printf(prettyJSON.String())
// verify JWT
printf("\nverifying...")
url := fmt.Sprintf(certsURLFormat, t.RemoteServiceProxyURL)
jwkSet, err := jwk.FetchHTTP(url)
if err != nil {
return errors.Wrap(err, "fetching certs")
}
if _, err = jws.VerifyWithJWKSet(jwtBytes, jwkSet, nil); err != nil {
return errors.Wrap(err, "verifying cert")
}
if err := token.Verify(jwt.WithAcceptableSkew(time.Minute)); err != nil {
printf("invalid token: %s", err)
return nil
}
printf("valid token")
return nil
}
// rotateCert is called by `token rotate-cert`
func (t *token) rotateCert(printf shared.FormatFn) error {
var verbosef = shared.NoPrintf
if t.Verbose {
verbosef = printf
}
verbosef("generating a new key and cert...")
cert, privateKey, err := provision.GenKeyCert(t.certKeyStrength, t.certExpirationInYears)
if err != nil {
return errors.Wrap(err, "generating cert")
}
rotateReq := rotateRequest{
PrivateKey: privateKey,
Certificate: cert,
KeyID: t.keyID,
}
verbosef("rotating certificate...")
body := new(bytes.Buffer)
err = json.NewEncoder(body).Encode(rotateReq)
if err != nil {
return errors.Wrap(err, "encoding")
}
rotateURL := fmt.Sprintf(rotateURLFormat, t.RemoteServiceProxyURL)
req, err := http.NewRequest(http.MethodPost, rotateURL, body)
if err != nil {
return errors.Wrap(err, "creating request")
}
req.SetBasicAuth(t.clientID, t.clientSecret)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
resp, err := t.Client.Do(req, nil)
if err != nil {
if resp != nil && resp.StatusCode == 401 {
return errors.Wrap(err, "authentication failed, check your key and secret")
}
return errors.Wrap(err, "rotating cert")
}
defer resp.Body.Close()
verbosef("new certificate:\n%s", cert)
verbosef("new private key:\n%s", privateKey)
printf("certificate successfully rotated")
return nil
}
// createSecret is called by `token create-secret`
func (t *token) createSecret(printf shared.FormatFn) error {
var verbosef = shared.NoPrintf
if t.Verbose {
verbosef = printf
}
jwkSet := &jwk.Set{}
verbosef("retrieving existing certificates...")
var err error
if t.truncate > 1 { // if 1, just skip old stuff
// old jwks
jwksURL := fmt.Sprintf(certsURLFormat, t.RemoteServiceProxyURL)
jwkSet, err = jwk.FetchHTTP(jwksURL)
if err != nil {
return errors.Wrap(err, "fetching jwks")
}
jwksBytes, err := json.Marshal(jwkSet)
if err != nil {
return errors.Wrap(err, "marshalling JSON")
}
verbosef("old jkws...\n%s", string(jwksBytes))
}
t.keyID = time.Now().Format(time.RFC3339)
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return errors.Wrap(err, "generating key")
}
// jwks
key, err := jwk.New(&privateKey.PublicKey)
if err != nil {
return errors.Wrap(err, "generating jwks")
}
key.Set(jwk.KeyIDKey, t.keyID)
key.Set(jwk.AlgorithmKey, jwa.RS256.String())
jwkSet.Keys = append(jwkSet.Keys, key)
// sort increasing and truncate
sort.Sort(sort.Reverse(byKID(jwkSet.Keys)))
if t.truncate > 0 {
jwkSet.Keys = jwkSet.Keys[:t.truncate]
}
jwksBytes, err := json.Marshal(jwkSet)
if err != nil {
return errors.Wrap(err, "marshalling JSON")
}
verbosef("new jkws...\n%s", string(jwksBytes))
// private key
keyBytes := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
// kid
kidProp := fmt.Sprintf(kidSecretPropFormat, t.keyID)
// Secret CRD
data := map[string]string{
jwksSecretKey: base64.StdEncoding.EncodeToString(jwksBytes),
keySecretKey: base64.StdEncoding.EncodeToString(keyBytes),
kidSecretKey: base64.StdEncoding.EncodeToString([]byte(kidProp)),
}
crd := shared.KubernetesCRD{
APIVersion: "v1",
Kind: "Secret",
Type: "Opaque",
Metadata: shared.Metadata{
Name: fmt.Sprintf(policySecretNameFormat, t.Org, t.Env),
Namespace: t.namespace,
},
Data: data,
}
// encode as YAML
var yamlBuffer bytes.Buffer
yamlEncoder := yaml.NewEncoder(&yamlBuffer)
yamlEncoder.SetIndent(2)
err = yamlEncoder.Encode(crd)
if err != nil {
return errors.Wrap(err, "encoding YAML")
}
printf("# Secret for apigee-remote-service-envoy")
printf("# generated by apigee-remote-service-cli provision on %s", time.Now().Format("2006-01-02 15:04:05"))
printf(yamlBuffer.String())
return nil
}
type rotateRequest struct {
PrivateKey string `json:"private_key"`
Certificate string `json:"certificate"`
KeyID string `json:"kid"`
}
type tokenRequest struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
GrantType string `json:"grant_type"`
}
type tokenResponse struct {
Token string `json:"token"`
}
type byKID []jwk.Key
func (a byKID) Len() int { return len(a) }
func (a byKID) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKID) Less(i, j int) bool { return a[i].KeyID() < a[j].KeyID() }
| {
return fmt.Errorf("only valid for legacy or hybrid, use create-secret for hybrid")
} | conditional_block |
token.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package token
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"sort"
"time"
"github.com/apigee/apigee-remote-service-cli/cmd/provision"
"github.com/apigee/apigee-remote-service-cli/shared"
"github.com/lestrrat-go/jwx/jwa"
"github.com/lestrrat-go/jwx/jwk"
"github.com/lestrrat-go/jwx/jws"
"github.com/lestrrat-go/jwx/jwt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
)
const (
tokenURLFormat = "%s/token" // RemoteServiceProxyURL
certsURLFormat = "%s/certs" // RemoteServiceProxyURL
rotateURLFormat = "%s/rotate" // RemoteServiceProxyURL
clientCredentialsGrant = "client_credentials"
policySecretNameFormat = "%s-%s-policy-secret"
commonName = "apigee-remote-service"
orgName = "Google LLC"
// hybrid forces specific file extensions! https://docs.apigee.com/hybrid/v1.2/k8s-secrets
jwksSecretKey = "remote-service.crt" // obviously not a .crt, but hybrid will treat as blob
keySecretKey = "remote-service.key"
kidSecretKey = "remote-service.properties"
kidSecretPropFormat = "kid=%s" // KID
)
type token struct {
*shared.RootArgs
clientID string
clientSecret string
file string
keyID string
certExpirationInYears int
certKeyStrength int
namespace string
truncate int
}
// Cmd returns base command
func Cmd(rootArgs *shared.RootArgs, printf shared.FormatFn) *cobra.Command {
t := &token{RootArgs: rootArgs}
c := &cobra.Command{
Use: "token",
Short: "JWT Token Utilities",
Long: "JWT Token Utilities",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
return rootArgs.Resolve(true, true)
},
}
c.AddCommand(cmdCreateToken(t, printf))
c.AddCommand(cmdInspectToken(t, printf))
c.AddCommand(cmdRotateCert(t, printf))
c.AddCommand(cmdCreateSecret(t, printf))
return c
}
func | (t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "create",
Short: "Create a new OAuth token",
Long: "Create a new OAuth token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
token, err := t.createToken(printf)
if err != nil {
return errors.Wrap(err, "creating token")
}
printf(token)
return nil
},
}
c.Flags().StringVarP(&t.clientID, "id", "i", "", "client id")
c.Flags().StringVarP(&t.clientSecret, "secret", "s", "", "client secret")
c.MarkFlagRequired("id")
c.MarkFlagRequired("secret")
return c
}
func cmdInspectToken(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "inspect",
Short: "Inspect a JWT token",
Long: "Inspect a JWT token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
err := t.inspectToken(cmd.InOrStdin(), printf)
if err != nil {
return errors.Wrap(err, "inspecting token")
}
return nil
},
}
c.Flags().StringVarP(&t.file, "file", "f", "", "token file (default: use stdin)")
return c
}
func cmdCreateSecret(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "create-secret",
Short: "create Kubernetes CRDs for JWT tokens (hybrid only)",
Long: "Creates a new Kubernetes Secret CRD for JWT tokens, maintains prior cert(s) for rotation.",
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, _ []string) {
if t.ServerConfig != nil {
t.clientID = t.ServerConfig.Tenant.Key
t.clientSecret = t.ServerConfig.Tenant.Secret
}
t.keyID = time.Now().Format(time.RFC3339)
t.createSecret(printf)
},
}
c.Flags().IntVarP(&t.certExpirationInYears, "years", "", 1, "number of years before the cert expires")
c.Flags().IntVarP(&t.certKeyStrength, "strength", "", 2048, "key strength")
c.Flags().StringVarP(&t.namespace, "namespace", "n", "apigee", "emit Secret in the specified namespace")
c.Flags().IntVarP(&t.truncate, "truncate", "", 2, "number of certs to keep in jwks")
return c
}
func cmdRotateCert(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "rotate-cert",
Short: "rotate JWT certificate (legacy or opdk)",
Long: "Deploys a new private and public key while maintaining the current public key for existing tokens (legacy or opdk).",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
if t.IsGCPManaged {
return fmt.Errorf("only valid for legacy or hybrid, use create-secret for hybrid")
}
if t.ServerConfig != nil {
t.clientID = t.ServerConfig.Tenant.Key
t.clientSecret = t.ServerConfig.Tenant.Secret
}
missingFlagNames := []string{}
if t.clientID == "" {
missingFlagNames = append(missingFlagNames, "key")
}
if t.clientSecret == "" {
missingFlagNames = append(missingFlagNames, "secret")
}
if err := t.PrintMissingFlags(missingFlagNames); err != nil {
return err
}
t.rotateCert(printf)
return nil
},
}
c.Flags().StringVarP(&t.keyID, "kid", "", "1", "new key id")
c.Flags().IntVarP(&t.certExpirationInYears, "years", "", 1, "number of years before the cert expires")
c.Flags().IntVarP(&t.certKeyStrength, "strength", "", 2048, "key strength")
c.Flags().StringVarP(&t.clientID, "key", "k", "", "provision key")
c.Flags().StringVarP(&t.clientSecret, "secret", "s", "", "provision secret")
return c
}
func (t *token) createToken(printf shared.FormatFn) (string, error) {
tokenReq := &tokenRequest{
ClientID: t.clientID,
ClientSecret: t.clientSecret,
GrantType: clientCredentialsGrant,
}
body := new(bytes.Buffer)
json.NewEncoder(body).Encode(tokenReq)
tokenURL := fmt.Sprintf(tokenURLFormat, t.RemoteServiceProxyURL)
req, err := http.NewRequest(http.MethodPost, tokenURL, body)
if err != nil {
return "", errors.Wrap(err, "creating request")
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
var tokenRes tokenResponse
resp, err := t.Client.Do(req, &tokenRes)
if err != nil {
return "", errors.Wrap(err, "creating token")
}
defer resp.Body.Close()
return tokenRes.Token, nil
}
func (t *token) inspectToken(in io.Reader, printf shared.FormatFn) error {
var file = in
if t.file != "" {
var err error
file, err = os.Open(t.file)
if err != nil {
return errors.Wrapf(err, "opening file %s", t.file)
}
}
jwtBytes, err := ioutil.ReadAll(file)
if err != nil {
return errors.Wrap(err, "reading jwt token")
}
token, err := jwt.ParseBytes(jwtBytes)
if err != nil {
return errors.Wrap(err, "parsing jwt token")
}
jsonBytes, err := token.MarshalJSON()
if err != nil {
return errors.Wrap(err, "printing jwt token")
}
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, jsonBytes, "", "\t")
if err != nil {
return errors.Wrap(err, "printing jwt token")
}
printf(prettyJSON.String())
// verify JWT
printf("\nverifying...")
url := fmt.Sprintf(certsURLFormat, t.RemoteServiceProxyURL)
jwkSet, err := jwk.FetchHTTP(url)
if err != nil {
return errors.Wrap(err, "fetching certs")
}
if _, err = jws.VerifyWithJWKSet(jwtBytes, jwkSet, nil); err != nil {
return errors.Wrap(err, "verifying cert")
}
if err := token.Verify(jwt.WithAcceptableSkew(time.Minute)); err != nil {
printf("invalid token: %s", err)
return nil
}
printf("valid token")
return nil
}
// rotateCert is called by `token rotate-cert`
func (t *token) rotateCert(printf shared.FormatFn) error {
var verbosef = shared.NoPrintf
if t.Verbose {
verbosef = printf
}
verbosef("generating a new key and cert...")
cert, privateKey, err := provision.GenKeyCert(t.certKeyStrength, t.certExpirationInYears)
if err != nil {
return errors.Wrap(err, "generating cert")
}
rotateReq := rotateRequest{
PrivateKey: privateKey,
Certificate: cert,
KeyID: t.keyID,
}
verbosef("rotating certificate...")
body := new(bytes.Buffer)
err = json.NewEncoder(body).Encode(rotateReq)
if err != nil {
return errors.Wrap(err, "encoding")
}
rotateURL := fmt.Sprintf(rotateURLFormat, t.RemoteServiceProxyURL)
req, err := http.NewRequest(http.MethodPost, rotateURL, body)
if err != nil {
return errors.Wrap(err, "creating request")
}
req.SetBasicAuth(t.clientID, t.clientSecret)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
resp, err := t.Client.Do(req, nil)
if err != nil {
if resp != nil && resp.StatusCode == 401 {
return errors.Wrap(err, "authentication failed, check your key and secret")
}
return errors.Wrap(err, "rotating cert")
}
defer resp.Body.Close()
verbosef("new certificate:\n%s", cert)
verbosef("new private key:\n%s", privateKey)
printf("certificate successfully rotated")
return nil
}
// createSecret is called by `token create-secret`
func (t *token) createSecret(printf shared.FormatFn) error {
var verbosef = shared.NoPrintf
if t.Verbose {
verbosef = printf
}
jwkSet := &jwk.Set{}
verbosef("retrieving existing certificates...")
var err error
if t.truncate > 1 { // if 1, just skip old stuff
// old jwks
jwksURL := fmt.Sprintf(certsURLFormat, t.RemoteServiceProxyURL)
jwkSet, err = jwk.FetchHTTP(jwksURL)
if err != nil {
return errors.Wrap(err, "fetching jwks")
}
jwksBytes, err := json.Marshal(jwkSet)
if err != nil {
return errors.Wrap(err, "marshalling JSON")
}
verbosef("old jkws...\n%s", string(jwksBytes))
}
t.keyID = time.Now().Format(time.RFC3339)
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return errors.Wrap(err, "generating key")
}
// jwks
key, err := jwk.New(&privateKey.PublicKey)
if err != nil {
return errors.Wrap(err, "generating jwks")
}
key.Set(jwk.KeyIDKey, t.keyID)
key.Set(jwk.AlgorithmKey, jwa.RS256.String())
jwkSet.Keys = append(jwkSet.Keys, key)
// sort increasing and truncate
sort.Sort(sort.Reverse(byKID(jwkSet.Keys)))
if t.truncate > 0 {
jwkSet.Keys = jwkSet.Keys[:t.truncate]
}
jwksBytes, err := json.Marshal(jwkSet)
if err != nil {
return errors.Wrap(err, "marshalling JSON")
}
verbosef("new jkws...\n%s", string(jwksBytes))
// private key
keyBytes := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
// kid
kidProp := fmt.Sprintf(kidSecretPropFormat, t.keyID)
// Secret CRD
data := map[string]string{
jwksSecretKey: base64.StdEncoding.EncodeToString(jwksBytes),
keySecretKey: base64.StdEncoding.EncodeToString(keyBytes),
kidSecretKey: base64.StdEncoding.EncodeToString([]byte(kidProp)),
}
crd := shared.KubernetesCRD{
APIVersion: "v1",
Kind: "Secret",
Type: "Opaque",
Metadata: shared.Metadata{
Name: fmt.Sprintf(policySecretNameFormat, t.Org, t.Env),
Namespace: t.namespace,
},
Data: data,
}
// encode as YAML
var yamlBuffer bytes.Buffer
yamlEncoder := yaml.NewEncoder(&yamlBuffer)
yamlEncoder.SetIndent(2)
err = yamlEncoder.Encode(crd)
if err != nil {
return errors.Wrap(err, "encoding YAML")
}
printf("# Secret for apigee-remote-service-envoy")
printf("# generated by apigee-remote-service-cli provision on %s", time.Now().Format("2006-01-02 15:04:05"))
printf(yamlBuffer.String())
return nil
}
type rotateRequest struct {
PrivateKey string `json:"private_key"`
Certificate string `json:"certificate"`
KeyID string `json:"kid"`
}
type tokenRequest struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
GrantType string `json:"grant_type"`
}
type tokenResponse struct {
Token string `json:"token"`
}
type byKID []jwk.Key
func (a byKID) Len() int { return len(a) }
func (a byKID) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKID) Less(i, j int) bool { return a[i].KeyID() < a[j].KeyID() }
| cmdCreateToken | identifier_name |
token.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package token
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"sort"
"time"
"github.com/apigee/apigee-remote-service-cli/cmd/provision"
"github.com/apigee/apigee-remote-service-cli/shared"
"github.com/lestrrat-go/jwx/jwa"
"github.com/lestrrat-go/jwx/jwk"
"github.com/lestrrat-go/jwx/jws"
"github.com/lestrrat-go/jwx/jwt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
)
const (
tokenURLFormat = "%s/token" // RemoteServiceProxyURL
certsURLFormat = "%s/certs" // RemoteServiceProxyURL
rotateURLFormat = "%s/rotate" // RemoteServiceProxyURL
clientCredentialsGrant = "client_credentials"
policySecretNameFormat = "%s-%s-policy-secret"
commonName = "apigee-remote-service"
orgName = "Google LLC"
// hybrid forces specific file extensions! https://docs.apigee.com/hybrid/v1.2/k8s-secrets
jwksSecretKey = "remote-service.crt" // obviously not a .crt, but hybrid will treat as blob
keySecretKey = "remote-service.key"
kidSecretKey = "remote-service.properties"
kidSecretPropFormat = "kid=%s" // KID
)
type token struct {
*shared.RootArgs
clientID string
clientSecret string
file string
keyID string
certExpirationInYears int
certKeyStrength int
namespace string
truncate int
}
// Cmd returns base command
func Cmd(rootArgs *shared.RootArgs, printf shared.FormatFn) *cobra.Command {
t := &token{RootArgs: rootArgs}
c := &cobra.Command{
Use: "token",
Short: "JWT Token Utilities",
Long: "JWT Token Utilities",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
return rootArgs.Resolve(true, true)
},
}
c.AddCommand(cmdCreateToken(t, printf))
c.AddCommand(cmdInspectToken(t, printf))
c.AddCommand(cmdRotateCert(t, printf))
c.AddCommand(cmdCreateSecret(t, printf))
return c
}
func cmdCreateToken(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "create",
Short: "Create a new OAuth token",
Long: "Create a new OAuth token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
token, err := t.createToken(printf)
if err != nil {
return errors.Wrap(err, "creating token")
}
printf(token)
return nil
},
}
c.Flags().StringVarP(&t.clientID, "id", "i", "", "client id")
c.Flags().StringVarP(&t.clientSecret, "secret", "s", "", "client secret")
c.MarkFlagRequired("id")
c.MarkFlagRequired("secret")
return c
}
func cmdInspectToken(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "inspect",
Short: "Inspect a JWT token",
Long: "Inspect a JWT token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
err := t.inspectToken(cmd.InOrStdin(), printf)
if err != nil {
return errors.Wrap(err, "inspecting token")
}
return nil
},
}
c.Flags().StringVarP(&t.file, "file", "f", "", "token file (default: use stdin)")
return c
}
func cmdCreateSecret(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "create-secret",
Short: "create Kubernetes CRDs for JWT tokens (hybrid only)",
Long: "Creates a new Kubernetes Secret CRD for JWT tokens, maintains prior cert(s) for rotation.",
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, _ []string) {
if t.ServerConfig != nil {
t.clientID = t.ServerConfig.Tenant.Key
t.clientSecret = t.ServerConfig.Tenant.Secret
}
t.keyID = time.Now().Format(time.RFC3339)
t.createSecret(printf)
},
}
c.Flags().IntVarP(&t.certExpirationInYears, "years", "", 1, "number of years before the cert expires")
c.Flags().IntVarP(&t.certKeyStrength, "strength", "", 2048, "key strength")
c.Flags().StringVarP(&t.namespace, "namespace", "n", "apigee", "emit Secret in the specified namespace")
c.Flags().IntVarP(&t.truncate, "truncate", "", 2, "number of certs to keep in jwks")
return c
}
func cmdRotateCert(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "rotate-cert",
Short: "rotate JWT certificate (legacy or opdk)",
Long: "Deploys a new private and public key while maintaining the current public key for existing tokens (legacy or opdk).",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
if t.IsGCPManaged {
return fmt.Errorf("only valid for legacy or hybrid, use create-secret for hybrid")
}
if t.ServerConfig != nil {
t.clientID = t.ServerConfig.Tenant.Key
t.clientSecret = t.ServerConfig.Tenant.Secret
}
missingFlagNames := []string{}
if t.clientID == "" {
missingFlagNames = append(missingFlagNames, "key")
}
if t.clientSecret == "" {
missingFlagNames = append(missingFlagNames, "secret")
}
if err := t.PrintMissingFlags(missingFlagNames); err != nil {
return err
}
t.rotateCert(printf)
return nil
},
}
c.Flags().StringVarP(&t.keyID, "kid", "", "1", "new key id")
c.Flags().IntVarP(&t.certExpirationInYears, "years", "", 1, "number of years before the cert expires")
c.Flags().IntVarP(&t.certKeyStrength, "strength", "", 2048, "key strength")
c.Flags().StringVarP(&t.clientID, "key", "k", "", "provision key")
c.Flags().StringVarP(&t.clientSecret, "secret", "s", "", "provision secret")
return c
}
func (t *token) createToken(printf shared.FormatFn) (string, error) {
tokenReq := &tokenRequest{
ClientID: t.clientID,
ClientSecret: t.clientSecret,
GrantType: clientCredentialsGrant,
}
body := new(bytes.Buffer)
json.NewEncoder(body).Encode(tokenReq)
tokenURL := fmt.Sprintf(tokenURLFormat, t.RemoteServiceProxyURL)
req, err := http.NewRequest(http.MethodPost, tokenURL, body)
if err != nil {
return "", errors.Wrap(err, "creating request")
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
var tokenRes tokenResponse
resp, err := t.Client.Do(req, &tokenRes)
if err != nil {
return "", errors.Wrap(err, "creating token")
}
defer resp.Body.Close()
return tokenRes.Token, nil
}
func (t *token) inspectToken(in io.Reader, printf shared.FormatFn) error {
var file = in
if t.file != "" {
var err error
file, err = os.Open(t.file)
if err != nil {
return errors.Wrapf(err, "opening file %s", t.file)
}
}
jwtBytes, err := ioutil.ReadAll(file)
if err != nil {
return errors.Wrap(err, "reading jwt token")
}
token, err := jwt.ParseBytes(jwtBytes)
if err != nil {
return errors.Wrap(err, "parsing jwt token")
}
jsonBytes, err := token.MarshalJSON()
if err != nil {
return errors.Wrap(err, "printing jwt token")
}
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, jsonBytes, "", "\t")
if err != nil {
return errors.Wrap(err, "printing jwt token")
}
printf(prettyJSON.String())
// verify JWT
printf("\nverifying...")
url := fmt.Sprintf(certsURLFormat, t.RemoteServiceProxyURL)
jwkSet, err := jwk.FetchHTTP(url)
if err != nil {
return errors.Wrap(err, "fetching certs")
}
if _, err = jws.VerifyWithJWKSet(jwtBytes, jwkSet, nil); err != nil {
return errors.Wrap(err, "verifying cert")
}
if err := token.Verify(jwt.WithAcceptableSkew(time.Minute)); err != nil {
printf("invalid token: %s", err)
return nil
}
printf("valid token")
return nil
}
// rotateCert is called by `token rotate-cert`
func (t *token) rotateCert(printf shared.FormatFn) error {
var verbosef = shared.NoPrintf
if t.Verbose {
verbosef = printf
}
verbosef("generating a new key and cert...")
cert, privateKey, err := provision.GenKeyCert(t.certKeyStrength, t.certExpirationInYears)
if err != nil {
return errors.Wrap(err, "generating cert")
}
rotateReq := rotateRequest{
PrivateKey: privateKey,
Certificate: cert,
KeyID: t.keyID,
}
verbosef("rotating certificate...")
body := new(bytes.Buffer)
err = json.NewEncoder(body).Encode(rotateReq)
if err != nil {
return errors.Wrap(err, "encoding")
}
rotateURL := fmt.Sprintf(rotateURLFormat, t.RemoteServiceProxyURL)
req, err := http.NewRequest(http.MethodPost, rotateURL, body)
if err != nil {
return errors.Wrap(err, "creating request")
}
req.SetBasicAuth(t.clientID, t.clientSecret)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
resp, err := t.Client.Do(req, nil)
if err != nil {
if resp != nil && resp.StatusCode == 401 {
return errors.Wrap(err, "authentication failed, check your key and secret")
}
return errors.Wrap(err, "rotating cert")
}
defer resp.Body.Close()
verbosef("new certificate:\n%s", cert)
verbosef("new private key:\n%s", privateKey)
printf("certificate successfully rotated")
return nil
}
// createSecret is called by `token create-secret`
func (t *token) createSecret(printf shared.FormatFn) error {
var verbosef = shared.NoPrintf
if t.Verbose {
verbosef = printf
}
jwkSet := &jwk.Set{}
verbosef("retrieving existing certificates...")
var err error
if t.truncate > 1 { // if 1, just skip old stuff
// old jwks
jwksURL := fmt.Sprintf(certsURLFormat, t.RemoteServiceProxyURL)
jwkSet, err = jwk.FetchHTTP(jwksURL)
if err != nil {
return errors.Wrap(err, "fetching jwks")
}
jwksBytes, err := json.Marshal(jwkSet)
if err != nil {
return errors.Wrap(err, "marshalling JSON")
}
verbosef("old jkws...\n%s", string(jwksBytes))
}
t.keyID = time.Now().Format(time.RFC3339)
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return errors.Wrap(err, "generating key")
}
// jwks
key, err := jwk.New(&privateKey.PublicKey)
if err != nil {
return errors.Wrap(err, "generating jwks")
}
key.Set(jwk.KeyIDKey, t.keyID)
key.Set(jwk.AlgorithmKey, jwa.RS256.String())
jwkSet.Keys = append(jwkSet.Keys, key)
// sort increasing and truncate
sort.Sort(sort.Reverse(byKID(jwkSet.Keys)))
if t.truncate > 0 {
jwkSet.Keys = jwkSet.Keys[:t.truncate]
}
jwksBytes, err := json.Marshal(jwkSet)
if err != nil {
return errors.Wrap(err, "marshalling JSON")
}
verbosef("new jkws...\n%s", string(jwksBytes))
// private key
keyBytes := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
// kid
kidProp := fmt.Sprintf(kidSecretPropFormat, t.keyID)
// Secret CRD
data := map[string]string{
jwksSecretKey: base64.StdEncoding.EncodeToString(jwksBytes),
keySecretKey: base64.StdEncoding.EncodeToString(keyBytes),
kidSecretKey: base64.StdEncoding.EncodeToString([]byte(kidProp)),
}
crd := shared.KubernetesCRD{
APIVersion: "v1",
Kind: "Secret",
Type: "Opaque",
Metadata: shared.Metadata{
Name: fmt.Sprintf(policySecretNameFormat, t.Org, t.Env),
Namespace: t.namespace,
},
Data: data,
}
// encode as YAML
var yamlBuffer bytes.Buffer
yamlEncoder := yaml.NewEncoder(&yamlBuffer)
yamlEncoder.SetIndent(2)
err = yamlEncoder.Encode(crd)
if err != nil {
return errors.Wrap(err, "encoding YAML")
}
printf("# Secret for apigee-remote-service-envoy")
printf("# generated by apigee-remote-service-cli provision on %s", time.Now().Format("2006-01-02 15:04:05"))
printf(yamlBuffer.String())
return nil
}
type rotateRequest struct {
PrivateKey string `json:"private_key"`
Certificate string `json:"certificate"`
KeyID string `json:"kid"`
}
type tokenRequest struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
GrantType string `json:"grant_type"`
}
type tokenResponse struct {
Token string `json:"token"`
}
type byKID []jwk.Key
func (a byKID) Len() int { return len(a) }
func (a byKID) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKID) Less(i, j int) bool | { return a[i].KeyID() < a[j].KeyID() } | identifier_body | |
token.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package token
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net/http"
"os" | "time"
"github.com/apigee/apigee-remote-service-cli/cmd/provision"
"github.com/apigee/apigee-remote-service-cli/shared"
"github.com/lestrrat-go/jwx/jwa"
"github.com/lestrrat-go/jwx/jwk"
"github.com/lestrrat-go/jwx/jws"
"github.com/lestrrat-go/jwx/jwt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
)
const (
tokenURLFormat = "%s/token" // RemoteServiceProxyURL
certsURLFormat = "%s/certs" // RemoteServiceProxyURL
rotateURLFormat = "%s/rotate" // RemoteServiceProxyURL
clientCredentialsGrant = "client_credentials"
policySecretNameFormat = "%s-%s-policy-secret"
commonName = "apigee-remote-service"
orgName = "Google LLC"
// hybrid forces specific file extensions! https://docs.apigee.com/hybrid/v1.2/k8s-secrets
jwksSecretKey = "remote-service.crt" // obviously not a .crt, but hybrid will treat as blob
keySecretKey = "remote-service.key"
kidSecretKey = "remote-service.properties"
kidSecretPropFormat = "kid=%s" // KID
)
type token struct {
*shared.RootArgs
clientID string
clientSecret string
file string
keyID string
certExpirationInYears int
certKeyStrength int
namespace string
truncate int
}
// Cmd returns base command
func Cmd(rootArgs *shared.RootArgs, printf shared.FormatFn) *cobra.Command {
t := &token{RootArgs: rootArgs}
c := &cobra.Command{
Use: "token",
Short: "JWT Token Utilities",
Long: "JWT Token Utilities",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
return rootArgs.Resolve(true, true)
},
}
c.AddCommand(cmdCreateToken(t, printf))
c.AddCommand(cmdInspectToken(t, printf))
c.AddCommand(cmdRotateCert(t, printf))
c.AddCommand(cmdCreateSecret(t, printf))
return c
}
func cmdCreateToken(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "create",
Short: "Create a new OAuth token",
Long: "Create a new OAuth token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
token, err := t.createToken(printf)
if err != nil {
return errors.Wrap(err, "creating token")
}
printf(token)
return nil
},
}
c.Flags().StringVarP(&t.clientID, "id", "i", "", "client id")
c.Flags().StringVarP(&t.clientSecret, "secret", "s", "", "client secret")
c.MarkFlagRequired("id")
c.MarkFlagRequired("secret")
return c
}
func cmdInspectToken(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "inspect",
Short: "Inspect a JWT token",
Long: "Inspect a JWT token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
err := t.inspectToken(cmd.InOrStdin(), printf)
if err != nil {
return errors.Wrap(err, "inspecting token")
}
return nil
},
}
c.Flags().StringVarP(&t.file, "file", "f", "", "token file (default: use stdin)")
return c
}
func cmdCreateSecret(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "create-secret",
Short: "create Kubernetes CRDs for JWT tokens (hybrid only)",
Long: "Creates a new Kubernetes Secret CRD for JWT tokens, maintains prior cert(s) for rotation.",
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, _ []string) {
if t.ServerConfig != nil {
t.clientID = t.ServerConfig.Tenant.Key
t.clientSecret = t.ServerConfig.Tenant.Secret
}
t.keyID = time.Now().Format(time.RFC3339)
t.createSecret(printf)
},
}
c.Flags().IntVarP(&t.certExpirationInYears, "years", "", 1, "number of years before the cert expires")
c.Flags().IntVarP(&t.certKeyStrength, "strength", "", 2048, "key strength")
c.Flags().StringVarP(&t.namespace, "namespace", "n", "apigee", "emit Secret in the specified namespace")
c.Flags().IntVarP(&t.truncate, "truncate", "", 2, "number of certs to keep in jwks")
return c
}
func cmdRotateCert(t *token, printf shared.FormatFn) *cobra.Command {
c := &cobra.Command{
Use: "rotate-cert",
Short: "rotate JWT certificate (legacy or opdk)",
Long: "Deploys a new private and public key while maintaining the current public key for existing tokens (legacy or opdk).",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
if t.IsGCPManaged {
return fmt.Errorf("only valid for legacy or hybrid, use create-secret for hybrid")
}
if t.ServerConfig != nil {
t.clientID = t.ServerConfig.Tenant.Key
t.clientSecret = t.ServerConfig.Tenant.Secret
}
missingFlagNames := []string{}
if t.clientID == "" {
missingFlagNames = append(missingFlagNames, "key")
}
if t.clientSecret == "" {
missingFlagNames = append(missingFlagNames, "secret")
}
if err := t.PrintMissingFlags(missingFlagNames); err != nil {
return err
}
t.rotateCert(printf)
return nil
},
}
c.Flags().StringVarP(&t.keyID, "kid", "", "1", "new key id")
c.Flags().IntVarP(&t.certExpirationInYears, "years", "", 1, "number of years before the cert expires")
c.Flags().IntVarP(&t.certKeyStrength, "strength", "", 2048, "key strength")
c.Flags().StringVarP(&t.clientID, "key", "k", "", "provision key")
c.Flags().StringVarP(&t.clientSecret, "secret", "s", "", "provision secret")
return c
}
func (t *token) createToken(printf shared.FormatFn) (string, error) {
tokenReq := &tokenRequest{
ClientID: t.clientID,
ClientSecret: t.clientSecret,
GrantType: clientCredentialsGrant,
}
body := new(bytes.Buffer)
json.NewEncoder(body).Encode(tokenReq)
tokenURL := fmt.Sprintf(tokenURLFormat, t.RemoteServiceProxyURL)
req, err := http.NewRequest(http.MethodPost, tokenURL, body)
if err != nil {
return "", errors.Wrap(err, "creating request")
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
var tokenRes tokenResponse
resp, err := t.Client.Do(req, &tokenRes)
if err != nil {
return "", errors.Wrap(err, "creating token")
}
defer resp.Body.Close()
return tokenRes.Token, nil
}
func (t *token) inspectToken(in io.Reader, printf shared.FormatFn) error {
var file = in
if t.file != "" {
var err error
file, err = os.Open(t.file)
if err != nil {
return errors.Wrapf(err, "opening file %s", t.file)
}
}
jwtBytes, err := ioutil.ReadAll(file)
if err != nil {
return errors.Wrap(err, "reading jwt token")
}
token, err := jwt.ParseBytes(jwtBytes)
if err != nil {
return errors.Wrap(err, "parsing jwt token")
}
jsonBytes, err := token.MarshalJSON()
if err != nil {
return errors.Wrap(err, "printing jwt token")
}
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, jsonBytes, "", "\t")
if err != nil {
return errors.Wrap(err, "printing jwt token")
}
printf(prettyJSON.String())
// verify JWT
printf("\nverifying...")
url := fmt.Sprintf(certsURLFormat, t.RemoteServiceProxyURL)
jwkSet, err := jwk.FetchHTTP(url)
if err != nil {
return errors.Wrap(err, "fetching certs")
}
if _, err = jws.VerifyWithJWKSet(jwtBytes, jwkSet, nil); err != nil {
return errors.Wrap(err, "verifying cert")
}
if err := token.Verify(jwt.WithAcceptableSkew(time.Minute)); err != nil {
printf("invalid token: %s", err)
return nil
}
printf("valid token")
return nil
}
// rotateCert is called by `token rotate-cert`
func (t *token) rotateCert(printf shared.FormatFn) error {
var verbosef = shared.NoPrintf
if t.Verbose {
verbosef = printf
}
verbosef("generating a new key and cert...")
cert, privateKey, err := provision.GenKeyCert(t.certKeyStrength, t.certExpirationInYears)
if err != nil {
return errors.Wrap(err, "generating cert")
}
rotateReq := rotateRequest{
PrivateKey: privateKey,
Certificate: cert,
KeyID: t.keyID,
}
verbosef("rotating certificate...")
body := new(bytes.Buffer)
err = json.NewEncoder(body).Encode(rotateReq)
if err != nil {
return errors.Wrap(err, "encoding")
}
rotateURL := fmt.Sprintf(rotateURLFormat, t.RemoteServiceProxyURL)
req, err := http.NewRequest(http.MethodPost, rotateURL, body)
if err != nil {
return errors.Wrap(err, "creating request")
}
req.SetBasicAuth(t.clientID, t.clientSecret)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
resp, err := t.Client.Do(req, nil)
if err != nil {
if resp != nil && resp.StatusCode == 401 {
return errors.Wrap(err, "authentication failed, check your key and secret")
}
return errors.Wrap(err, "rotating cert")
}
defer resp.Body.Close()
verbosef("new certificate:\n%s", cert)
verbosef("new private key:\n%s", privateKey)
printf("certificate successfully rotated")
return nil
}
// createSecret is called by `token create-secret`
func (t *token) createSecret(printf shared.FormatFn) error {
var verbosef = shared.NoPrintf
if t.Verbose {
verbosef = printf
}
jwkSet := &jwk.Set{}
verbosef("retrieving existing certificates...")
var err error
if t.truncate > 1 { // if 1, just skip old stuff
// old jwks
jwksURL := fmt.Sprintf(certsURLFormat, t.RemoteServiceProxyURL)
jwkSet, err = jwk.FetchHTTP(jwksURL)
if err != nil {
return errors.Wrap(err, "fetching jwks")
}
jwksBytes, err := json.Marshal(jwkSet)
if err != nil {
return errors.Wrap(err, "marshalling JSON")
}
verbosef("old jkws...\n%s", string(jwksBytes))
}
t.keyID = time.Now().Format(time.RFC3339)
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return errors.Wrap(err, "generating key")
}
// jwks
key, err := jwk.New(&privateKey.PublicKey)
if err != nil {
return errors.Wrap(err, "generating jwks")
}
key.Set(jwk.KeyIDKey, t.keyID)
key.Set(jwk.AlgorithmKey, jwa.RS256.String())
jwkSet.Keys = append(jwkSet.Keys, key)
// sort increasing and truncate
sort.Sort(sort.Reverse(byKID(jwkSet.Keys)))
if t.truncate > 0 {
jwkSet.Keys = jwkSet.Keys[:t.truncate]
}
jwksBytes, err := json.Marshal(jwkSet)
if err != nil {
return errors.Wrap(err, "marshalling JSON")
}
verbosef("new jkws...\n%s", string(jwksBytes))
// private key
keyBytes := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
// kid
kidProp := fmt.Sprintf(kidSecretPropFormat, t.keyID)
// Secret CRD
data := map[string]string{
jwksSecretKey: base64.StdEncoding.EncodeToString(jwksBytes),
keySecretKey: base64.StdEncoding.EncodeToString(keyBytes),
kidSecretKey: base64.StdEncoding.EncodeToString([]byte(kidProp)),
}
crd := shared.KubernetesCRD{
APIVersion: "v1",
Kind: "Secret",
Type: "Opaque",
Metadata: shared.Metadata{
Name: fmt.Sprintf(policySecretNameFormat, t.Org, t.Env),
Namespace: t.namespace,
},
Data: data,
}
// encode as YAML
var yamlBuffer bytes.Buffer
yamlEncoder := yaml.NewEncoder(&yamlBuffer)
yamlEncoder.SetIndent(2)
err = yamlEncoder.Encode(crd)
if err != nil {
return errors.Wrap(err, "encoding YAML")
}
printf("# Secret for apigee-remote-service-envoy")
printf("# generated by apigee-remote-service-cli provision on %s", time.Now().Format("2006-01-02 15:04:05"))
printf(yamlBuffer.String())
return nil
}
type rotateRequest struct {
PrivateKey string `json:"private_key"`
Certificate string `json:"certificate"`
KeyID string `json:"kid"`
}
type tokenRequest struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
GrantType string `json:"grant_type"`
}
type tokenResponse struct {
Token string `json:"token"`
}
type byKID []jwk.Key
func (a byKID) Len() int { return len(a) }
func (a byKID) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKID) Less(i, j int) bool { return a[i].KeyID() < a[j].KeyID() } | "sort" | random_line_split |
seq2seq_trainer.py | import os
import sys
from argparse import ArgumentParser
import random
# # python.dataScience.notebookFileRoot=${fileDirname}
# wdir = os.path.abspath(os.getcwd() + "/../../")
# sys.path.append(wdir)
# print(sys.path)
# print(wdir)
import text_loaders as tl
import rnn_encoder_decoder as encdec
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
import pytorch_lightning.metrics.functional as plfunc
from pytorch_lightning.loggers import TensorBoardLogger
#%%
class Seq2SeqCorrector(pl.LightningModule):
"""Encoder decoder pytorch module for trainning seq2seq model with teacher forcing
Module try to learn mapping from one sequence to antother. This implementation try to learn to reverse string of chars
"""
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--emb_dim", type=int, default=32)
parser.add_argument("--hidden_dim", type=int, default=64)
parser.add_argument("--dropout", type=float, default=0.1)
return parser
def __init__(
self,
vocab_size,
padding_index=0,
emb_dim=8,
hidden_dim=32,
dropout=0.1,
**kwargs,
) -> None:
super().__init__()
self.vocab_size = vocab_size
# dynamic, based on tokenizer vocab size defined in datamodule
self.input_dim = vocab_size
self.output_dim = vocab_size
self.enc_emb_dim = emb_dim # ENC_EMB_DIM
self.dec_emb_dim = emb_dim # DEC_EMB_DIM
self.enc_hid_dim = hidden_dim # ENC_HID_DIM
self.dec_hid_dim = hidden_dim # DEC_HID_DIM
self.enc_dropout = dropout # ENC_DROPOUT
self.dec_dropout = dropout # DEC_DROPOUT
self.pad_idx = padding_index
self.save_hyperparameters()
self.max_epochs = kwargs["max_epochs"]
self.learning_rate = 0.0005
# self.input_src = torch.LongTensor(1).to(self.device)
# self.input_src_len = torch.LongTensor(1).to(self.device)
# self.input_trg = torch.LongTensor(1).to(self.device)
# todo: remove it this blocks loading state_dict from checkpoints
# Error(s) in loading state_dict for Seq2SeqCorrector:
# size mismatch for input_src: copying a param with shape
# torch.Size([201, 18]) from checkpoint,
# the shape in current model is torch.Size([1]).
# self.register_buffer("input_src", torch.LongTensor(1))
# self.register_buffer("input_src_len", torch.LongTensor(1))
# self.register_buffer("input_trg", torch.LongTensor(1))
self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
self.attention = encdec.Attention(self.enc_hid_dim, self.dec_hid_dim)
# INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT
self.encoder = encdec.Encoder(
self.input_dim,
self.enc_emb_dim,
self.enc_hid_dim,
self.dec_hid_dim,
self.enc_dropout,
)
self.decoder = encdec.Decoder(
self.output_dim, # OUTPUT_DIM,
self.dec_emb_dim, # DEC_EMB_DIM,
self.enc_hid_dim, # ENC_HID_DIM,
self.dec_hid_dim, # DEC_HID_DIM,
self.dec_dropout, # DEC_DROPOUT,
self.attention,
)
self._init_weights()
def _init_weights(self):
for name, param in self.named_parameters():
if "weight" in name:
|
else:
nn.init.constant_(param.data, 0)
def create_mask(self, src):
mask = (src != self.pad_idx).permute(1, 0)
return mask
def forward(self, src, src_len, trg, teacher_forcing_ratio=0.5):
# src = [src len, batch size]
# src_len = [batch size]
# trg = [trg len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs TODO: change to registered buffer in pyLightning
decoder_outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(
self.device
)
# encoder_outputs is all hidden states of the input sequence, back and forwards
# hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src, src_len)
mask = self.create_mask(src)
# mask = [batch size, src len]
# without sos token at the beginning and eos token at the end
# first input to the decoder is the <sos> tokens
input = trg[0, :]
# starting with input=<sos> (trg[0]) token and try to predict next token trg[1] so loop starts from 1 range(1, trg_len)
for t in range(1, trg_len):
# insert input token embedding, previous hidden state, all encoder hidden states
# and mask
# receive output tensor (predictions) and new hidden state
output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask)
# place predictions in a tensor holding predictions for each token
decoder_outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = trg[t] if teacher_force else top1
return decoder_outputs
def loss(self, logits, target):
return self._loss(logits, target)
def configure_optimizers(self):
# return optim.Adam(self.parameters(), lr=5e-4)
# optimizer = optim.Adam(self.parameters(), lr=1e-3)
# scheduler = optim.LambdaLR(optimizer, ...)
# return [optimizer], [scheduler]
# optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
# scheduler = optim.lr_scheduler.InverseSquareRootLR(optimizer, self.lr_warmup_steps)
# return (
# [optimizer],
# [
# {
# "scheduler": scheduler,
# "interval": "step",
# "frequency": 1,
# "reduce_on_plateau": False,
# "monitor": "val_loss",
# }
# ],
# )
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
lr_scheduler = {
"scheduler": optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.learning_rate,
steps_per_epoch=int(len(self.train_dataloader())),
epochs=self.max_epochs,
anneal_strategy="linear",
final_div_factor=1000,
pct_start=0.01,
),
"name": "learning_rate",
"interval": "step",
"frequency": 1,
}
return [optimizer], [lr_scheduler]
def training_step(self, batch, batch_idx):
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
# trg_lengths = trg_batch["trg_lengths"]
# resize input buffers, should speed up training and help
# with memory leaks https://discuss.pytorch.org/t/how-to-debug-causes-of-gpu-memory-leaks/6741
# self.input_src.resize_(src_seq.shape).copy_(src_seq)
# self.input_src_len.resize_(src_lengths.shape).copy_(src_lengths)
# self.input_trg.resize_(trg_seq.shape).copy_(trg_seq)
# just for testing lr scheduler
# output = torch.randn((*trg_seq.size(), self.output_dim), requires_grad=True, device=trg_seq.device)
# output = self.forward(self.input_src, self.input_src_len, self.input_trg)
# old version of forward, with tensors from dataloader
output = self.forward(src_seq, src_lengths, trg_seq)
# do not know if this is a problem, loss will be computed with sos token
# without sos token at the beginning and eos token at the end
output = output[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(output, trg)
self.log(
"train_loss",
loss.item(),
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return loss
def validation_step(self, batch, batch_idx):
"""validation is in eval mode so we do not have to use
placeholder input tensors
"""
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
trg_lengths = trg_batch["trg_lengths"]
outputs = self.forward(src_seq, src_lengths, trg_seq, 0)
# # without sos token at the beginning and eos token at the end
logits = outputs[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(logits, trg)
# take without first sos token, and reduce by 2 dimension, take index of max logits (make prediction)
# seq_len * batch size * vocab_size -> seq_len * batch_size
pred_seq = outputs[1:].argmax(2)
# change layout: seq_len * batch_size -> batch_size * seq_len
pred_seq = pred_seq.T
# change layout: seq_len * batch_size -> batch_size * seq_len
trg_batch = trg_seq[1:].T
# compere list of predicted ids for all sequences in a batch to targets
acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))
# need to cast to list of predicted sequences (as list of token ids) [ [seq1_tok1, seq1_tok2, ...seq1_tokN],..., [seqK_tok1, seqK_tok2, ...seqK_tokZ]]
predicted_ids = pred_seq.tolist()
# need to add additional dim to each target reference sequence in order to
# convert to format needed by bleu_score function [ seq1=[ [reference1], [reference2] ], seq2=[ [reference1] ] ]
target_ids = torch.unsqueeze(trg_batch, 1).tolist()
# bleu score needs two arguments
# first: predicted_ids - list of predicted sequences as a list of predicted ids
# second: target_ids - list of references (can be many, list)
bleu_score = plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(
self.device
) # torch.unsqueeze(trg_batchT,1).tolist())
self.log(
"val_loss",
loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_acc",
acc,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_bleu_idx",
bleu_score,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
return loss, acc, bleu_score
if __name__ == "__main__":
# look to .vscode/launch.json file - there are set some args
parser = ArgumentParser()
# add PROGRAM level args
parser.add_argument("--N_samples", type=int, default=256 * 10)
parser.add_argument("--N_valid_size", type=int, default=32 * 10)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument(
"--dataset_path",
type=str,
default="./data/10k_sent_typos_wikipedia.jsonl",
)
# add model specific args
parser = Seq2SeqCorrector.add_model_specific_args(parser)
# add all the available trainer options to argparse
# ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
dm = tl.ABCSec2SeqDataModule(
batch_size=args.batch_size,
N_random_samples=args.N_samples,
N_valid_size=args.N_valid_size,
num_workers=args.num_workers,
)
# dm = tl.SeqPairJsonDataModule(
# path=args.dataset_path,
# batch_size=args.batch_size,
# n_samples=args.N_samples,
# n_valid_size=args.N_valid_size,
# num_workers=args.num_workers,
# )
dm.prepare_data()
dm.setup("fit")
# to see results run in console
# tensorboard --logdir tb_logs/
# then open browser http://localhost:6006/
log_desc = f"RNN with attention model vocab_size={dm.vocab_size} data_size={dm.dims}, emb_dim={args.emb_dim} hidden_dim={args.hidden_dim}"
print(log_desc)
logger = TensorBoardLogger(
"model_corrector", name="pl_tensorboard_logs", comment=log_desc
)
from pytorch_lightning.callbacks import LearningRateMonitor
lr_monitor = LearningRateMonitor(logging_interval="step")
trainer = pl.Trainer.from_argparse_args(
args, logger=logger, replace_sampler_ddp=False, callbacks=[lr_monitor]
) # , distributed_backend='ddp_cpu')
model_args = vars(args)
model = Seq2SeqCorrector(
vocab_size=dm.vocab_size, padding_index=dm.padding_index, **model_args
)
# most basic trainer, uses good defaults (1 gpu)
trainer.fit(model, dm)
# sample cmd
# python seq2seq_trainer.py --dataset_path /data/10k_sent_typos_wikipedia.jsonl \
# --gpus=2 --max_epoch=5 --batch_size=16 --num_workers=4 \
# --emb_dim=128 --hidden_dim=512 \
# --log_gpu_memory=True --weights_summary=full \
# --N_samples=1000000 --N_valid_size=10000 --distributed_backend=ddp --precision=16 --accumulate_grad_batches=4 --val_check_interval=640 --gradient_clip_val=2.0 --track_grad_norm=2
# tensorboard dev --logdir model_corrector/pl_tensorboard_logs/version??
| nn.init.normal_(param.data, mean=0, std=0.01) | conditional_block |
seq2seq_trainer.py | import os
import sys
from argparse import ArgumentParser
import random
# # python.dataScience.notebookFileRoot=${fileDirname}
# wdir = os.path.abspath(os.getcwd() + "/../../")
# sys.path.append(wdir)
# print(sys.path)
# print(wdir)
import text_loaders as tl
import rnn_encoder_decoder as encdec
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
import pytorch_lightning.metrics.functional as plfunc
from pytorch_lightning.loggers import TensorBoardLogger
#%%
class Seq2SeqCorrector(pl.LightningModule):
"""Encoder decoder pytorch module for trainning seq2seq model with teacher forcing
Module try to learn mapping from one sequence to antother. This implementation try to learn to reverse string of chars
"""
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--emb_dim", type=int, default=32)
parser.add_argument("--hidden_dim", type=int, default=64)
parser.add_argument("--dropout", type=float, default=0.1)
return parser
def __init__(
self,
vocab_size,
padding_index=0,
emb_dim=8,
hidden_dim=32,
dropout=0.1,
**kwargs,
) -> None:
super().__init__()
self.vocab_size = vocab_size
# dynamic, based on tokenizer vocab size defined in datamodule
self.input_dim = vocab_size
self.output_dim = vocab_size
self.enc_emb_dim = emb_dim # ENC_EMB_DIM
self.dec_emb_dim = emb_dim # DEC_EMB_DIM
self.enc_hid_dim = hidden_dim # ENC_HID_DIM
self.dec_hid_dim = hidden_dim # DEC_HID_DIM
self.enc_dropout = dropout # ENC_DROPOUT
self.dec_dropout = dropout # DEC_DROPOUT
self.pad_idx = padding_index
self.save_hyperparameters()
self.max_epochs = kwargs["max_epochs"]
self.learning_rate = 0.0005
# self.input_src = torch.LongTensor(1).to(self.device)
# self.input_src_len = torch.LongTensor(1).to(self.device)
# self.input_trg = torch.LongTensor(1).to(self.device)
# todo: remove it this blocks loading state_dict from checkpoints
# Error(s) in loading state_dict for Seq2SeqCorrector:
# size mismatch for input_src: copying a param with shape
# torch.Size([201, 18]) from checkpoint,
# the shape in current model is torch.Size([1]).
# self.register_buffer("input_src", torch.LongTensor(1))
# self.register_buffer("input_src_len", torch.LongTensor(1))
# self.register_buffer("input_trg", torch.LongTensor(1))
self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
self.attention = encdec.Attention(self.enc_hid_dim, self.dec_hid_dim)
# INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT
self.encoder = encdec.Encoder(
self.input_dim,
self.enc_emb_dim,
self.enc_hid_dim,
self.dec_hid_dim,
self.enc_dropout,
)
self.decoder = encdec.Decoder(
self.output_dim, # OUTPUT_DIM,
self.dec_emb_dim, # DEC_EMB_DIM,
self.enc_hid_dim, # ENC_HID_DIM,
self.dec_hid_dim, # DEC_HID_DIM,
self.dec_dropout, # DEC_DROPOUT,
self.attention,
)
self._init_weights()
def _init_weights(self):
for name, param in self.named_parameters():
if "weight" in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def create_mask(self, src):
mask = (src != self.pad_idx).permute(1, 0)
return mask
def forward(self, src, src_len, trg, teacher_forcing_ratio=0.5):
# src = [src len, batch size]
# src_len = [batch size]
# trg = [trg len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs TODO: change to registered buffer in pyLightning
decoder_outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(
self.device
)
# encoder_outputs is all hidden states of the input sequence, back and forwards
# hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src, src_len)
mask = self.create_mask(src)
# mask = [batch size, src len]
# without sos token at the beginning and eos token at the end
# first input to the decoder is the <sos> tokens
input = trg[0, :]
# starting with input=<sos> (trg[0]) token and try to predict next token trg[1] so loop starts from 1 range(1, trg_len)
for t in range(1, trg_len):
# insert input token embedding, previous hidden state, all encoder hidden states
# and mask
# receive output tensor (predictions) and new hidden state
output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask)
# place predictions in a tensor holding predictions for each token
decoder_outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = trg[t] if teacher_force else top1
return decoder_outputs
def | (self, logits, target):
return self._loss(logits, target)
def configure_optimizers(self):
# return optim.Adam(self.parameters(), lr=5e-4)
# optimizer = optim.Adam(self.parameters(), lr=1e-3)
# scheduler = optim.LambdaLR(optimizer, ...)
# return [optimizer], [scheduler]
# optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
# scheduler = optim.lr_scheduler.InverseSquareRootLR(optimizer, self.lr_warmup_steps)
# return (
# [optimizer],
# [
# {
# "scheduler": scheduler,
# "interval": "step",
# "frequency": 1,
# "reduce_on_plateau": False,
# "monitor": "val_loss",
# }
# ],
# )
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
lr_scheduler = {
"scheduler": optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.learning_rate,
steps_per_epoch=int(len(self.train_dataloader())),
epochs=self.max_epochs,
anneal_strategy="linear",
final_div_factor=1000,
pct_start=0.01,
),
"name": "learning_rate",
"interval": "step",
"frequency": 1,
}
return [optimizer], [lr_scheduler]
def training_step(self, batch, batch_idx):
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
# trg_lengths = trg_batch["trg_lengths"]
# resize input buffers, should speed up training and help
# with memory leaks https://discuss.pytorch.org/t/how-to-debug-causes-of-gpu-memory-leaks/6741
# self.input_src.resize_(src_seq.shape).copy_(src_seq)
# self.input_src_len.resize_(src_lengths.shape).copy_(src_lengths)
# self.input_trg.resize_(trg_seq.shape).copy_(trg_seq)
# just for testing lr scheduler
# output = torch.randn((*trg_seq.size(), self.output_dim), requires_grad=True, device=trg_seq.device)
# output = self.forward(self.input_src, self.input_src_len, self.input_trg)
# old version of forward, with tensors from dataloader
output = self.forward(src_seq, src_lengths, trg_seq)
# do not know if this is a problem, loss will be computed with sos token
# without sos token at the beginning and eos token at the end
output = output[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(output, trg)
self.log(
"train_loss",
loss.item(),
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return loss
def validation_step(self, batch, batch_idx):
"""validation is in eval mode so we do not have to use
placeholder input tensors
"""
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
trg_lengths = trg_batch["trg_lengths"]
outputs = self.forward(src_seq, src_lengths, trg_seq, 0)
# # without sos token at the beginning and eos token at the end
logits = outputs[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(logits, trg)
# take without first sos token, and reduce by 2 dimension, take index of max logits (make prediction)
# seq_len * batch size * vocab_size -> seq_len * batch_size
pred_seq = outputs[1:].argmax(2)
# change layout: seq_len * batch_size -> batch_size * seq_len
pred_seq = pred_seq.T
# change layout: seq_len * batch_size -> batch_size * seq_len
trg_batch = trg_seq[1:].T
# compere list of predicted ids for all sequences in a batch to targets
acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))
# need to cast to list of predicted sequences (as list of token ids) [ [seq1_tok1, seq1_tok2, ...seq1_tokN],..., [seqK_tok1, seqK_tok2, ...seqK_tokZ]]
predicted_ids = pred_seq.tolist()
# need to add additional dim to each target reference sequence in order to
# convert to format needed by bleu_score function [ seq1=[ [reference1], [reference2] ], seq2=[ [reference1] ] ]
target_ids = torch.unsqueeze(trg_batch, 1).tolist()
# bleu score needs two arguments
# first: predicted_ids - list of predicted sequences as a list of predicted ids
# second: target_ids - list of references (can be many, list)
bleu_score = plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(
self.device
) # torch.unsqueeze(trg_batchT,1).tolist())
self.log(
"val_loss",
loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_acc",
acc,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_bleu_idx",
bleu_score,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
return loss, acc, bleu_score
if __name__ == "__main__":
# look to .vscode/launch.json file - there are set some args
parser = ArgumentParser()
# add PROGRAM level args
parser.add_argument("--N_samples", type=int, default=256 * 10)
parser.add_argument("--N_valid_size", type=int, default=32 * 10)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument(
"--dataset_path",
type=str,
default="./data/10k_sent_typos_wikipedia.jsonl",
)
# add model specific args
parser = Seq2SeqCorrector.add_model_specific_args(parser)
# add all the available trainer options to argparse
# ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
dm = tl.ABCSec2SeqDataModule(
batch_size=args.batch_size,
N_random_samples=args.N_samples,
N_valid_size=args.N_valid_size,
num_workers=args.num_workers,
)
# dm = tl.SeqPairJsonDataModule(
# path=args.dataset_path,
# batch_size=args.batch_size,
# n_samples=args.N_samples,
# n_valid_size=args.N_valid_size,
# num_workers=args.num_workers,
# )
dm.prepare_data()
dm.setup("fit")
# to see results run in console
# tensorboard --logdir tb_logs/
# then open browser http://localhost:6006/
log_desc = f"RNN with attention model vocab_size={dm.vocab_size} data_size={dm.dims}, emb_dim={args.emb_dim} hidden_dim={args.hidden_dim}"
print(log_desc)
logger = TensorBoardLogger(
"model_corrector", name="pl_tensorboard_logs", comment=log_desc
)
from pytorch_lightning.callbacks import LearningRateMonitor
lr_monitor = LearningRateMonitor(logging_interval="step")
trainer = pl.Trainer.from_argparse_args(
args, logger=logger, replace_sampler_ddp=False, callbacks=[lr_monitor]
) # , distributed_backend='ddp_cpu')
model_args = vars(args)
model = Seq2SeqCorrector(
vocab_size=dm.vocab_size, padding_index=dm.padding_index, **model_args
)
# most basic trainer, uses good defaults (1 gpu)
trainer.fit(model, dm)
# sample cmd
# python seq2seq_trainer.py --dataset_path /data/10k_sent_typos_wikipedia.jsonl \
# --gpus=2 --max_epoch=5 --batch_size=16 --num_workers=4 \
# --emb_dim=128 --hidden_dim=512 \
# --log_gpu_memory=True --weights_summary=full \
# --N_samples=1000000 --N_valid_size=10000 --distributed_backend=ddp --precision=16 --accumulate_grad_batches=4 --val_check_interval=640 --gradient_clip_val=2.0 --track_grad_norm=2
# tensorboard dev --logdir model_corrector/pl_tensorboard_logs/version??
| loss | identifier_name |
seq2seq_trainer.py | import os
import sys
from argparse import ArgumentParser
import random
# # python.dataScience.notebookFileRoot=${fileDirname}
# wdir = os.path.abspath(os.getcwd() + "/../../")
# sys.path.append(wdir)
# print(sys.path)
# print(wdir)
import text_loaders as tl
import rnn_encoder_decoder as encdec
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
import pytorch_lightning.metrics.functional as plfunc
from pytorch_lightning.loggers import TensorBoardLogger
#%%
class Seq2SeqCorrector(pl.LightningModule):
"""Encoder decoder pytorch module for trainning seq2seq model with teacher forcing
Module try to learn mapping from one sequence to antother. This implementation try to learn to reverse string of chars
"""
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--emb_dim", type=int, default=32)
parser.add_argument("--hidden_dim", type=int, default=64)
parser.add_argument("--dropout", type=float, default=0.1)
return parser
def __init__(
self,
vocab_size,
padding_index=0,
emb_dim=8,
hidden_dim=32,
dropout=0.1,
**kwargs,
) -> None:
super().__init__()
self.vocab_size = vocab_size
# dynamic, based on tokenizer vocab size defined in datamodule
self.input_dim = vocab_size
self.output_dim = vocab_size
self.enc_emb_dim = emb_dim # ENC_EMB_DIM
self.dec_emb_dim = emb_dim # DEC_EMB_DIM
self.enc_hid_dim = hidden_dim # ENC_HID_DIM
self.dec_hid_dim = hidden_dim # DEC_HID_DIM
self.enc_dropout = dropout # ENC_DROPOUT
self.dec_dropout = dropout # DEC_DROPOUT
self.pad_idx = padding_index
self.save_hyperparameters()
self.max_epochs = kwargs["max_epochs"]
self.learning_rate = 0.0005
# self.input_src = torch.LongTensor(1).to(self.device)
# self.input_src_len = torch.LongTensor(1).to(self.device)
# self.input_trg = torch.LongTensor(1).to(self.device)
# todo: remove it this blocks loading state_dict from checkpoints
# Error(s) in loading state_dict for Seq2SeqCorrector:
# size mismatch for input_src: copying a param with shape
# torch.Size([201, 18]) from checkpoint,
# the shape in current model is torch.Size([1]).
# self.register_buffer("input_src", torch.LongTensor(1))
# self.register_buffer("input_src_len", torch.LongTensor(1))
# self.register_buffer("input_trg", torch.LongTensor(1))
self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
self.attention = encdec.Attention(self.enc_hid_dim, self.dec_hid_dim)
# INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT
self.encoder = encdec.Encoder(
self.input_dim,
self.enc_emb_dim,
self.enc_hid_dim,
self.dec_hid_dim,
self.enc_dropout,
)
self.decoder = encdec.Decoder(
self.output_dim, # OUTPUT_DIM,
self.dec_emb_dim, # DEC_EMB_DIM,
self.enc_hid_dim, # ENC_HID_DIM,
self.dec_hid_dim, # DEC_HID_DIM,
self.dec_dropout, # DEC_DROPOUT,
self.attention,
)
self._init_weights()
def _init_weights(self):
for name, param in self.named_parameters():
if "weight" in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def create_mask(self, src):
mask = (src != self.pad_idx).permute(1, 0)
return mask
def forward(self, src, src_len, trg, teacher_forcing_ratio=0.5):
# src = [src len, batch size]
# src_len = [batch size]
# trg = [trg len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs TODO: change to registered buffer in pyLightning
decoder_outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(
self.device
)
# encoder_outputs is all hidden states of the input sequence, back and forwards
# hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src, src_len)
mask = self.create_mask(src)
# mask = [batch size, src len]
# without sos token at the beginning and eos token at the end
# first input to the decoder is the <sos> tokens
input = trg[0, :]
# starting with input=<sos> (trg[0]) token and try to predict next token trg[1] so loop starts from 1 range(1, trg_len)
for t in range(1, trg_len):
# insert input token embedding, previous hidden state, all encoder hidden states
# and mask
# receive output tensor (predictions) and new hidden state
output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask)
# place predictions in a tensor holding predictions for each token
decoder_outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = trg[t] if teacher_force else top1
return decoder_outputs
def loss(self, logits, target):
return self._loss(logits, target)
def configure_optimizers(self):
# return optim.Adam(self.parameters(), lr=5e-4)
# optimizer = optim.Adam(self.parameters(), lr=1e-3)
# scheduler = optim.LambdaLR(optimizer, ...)
# return [optimizer], [scheduler]
# optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
# scheduler = optim.lr_scheduler.InverseSquareRootLR(optimizer, self.lr_warmup_steps)
# return (
# [optimizer],
# [
# {
# "scheduler": scheduler,
# "interval": "step",
# "frequency": 1,
# "reduce_on_plateau": False,
# "monitor": "val_loss",
# }
# ],
# )
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
lr_scheduler = {
"scheduler": optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.learning_rate,
steps_per_epoch=int(len(self.train_dataloader())),
epochs=self.max_epochs,
anneal_strategy="linear",
final_div_factor=1000,
pct_start=0.01,
),
"name": "learning_rate",
"interval": "step",
"frequency": 1,
}
return [optimizer], [lr_scheduler]
def training_step(self, batch, batch_idx):
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
# trg_lengths = trg_batch["trg_lengths"]
# resize input buffers, should speed up training and help
# with memory leaks https://discuss.pytorch.org/t/how-to-debug-causes-of-gpu-memory-leaks/6741
# self.input_src.resize_(src_seq.shape).copy_(src_seq)
# self.input_src_len.resize_(src_lengths.shape).copy_(src_lengths)
# self.input_trg.resize_(trg_seq.shape).copy_(trg_seq)
# just for testing lr scheduler
# output = torch.randn((*trg_seq.size(), self.output_dim), requires_grad=True, device=trg_seq.device)
# output = self.forward(self.input_src, self.input_src_len, self.input_trg)
# old version of forward, with tensors from dataloader
output = self.forward(src_seq, src_lengths, trg_seq)
# do not know if this is a problem, loss will be computed with sos token
# without sos token at the beginning and eos token at the end
output = output[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(output, trg)
self.log(
"train_loss",
loss.item(),
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return loss
def validation_step(self, batch, batch_idx):
"""validation is in eval mode so we do not have to use
placeholder input tensors
"""
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
trg_lengths = trg_batch["trg_lengths"]
outputs = self.forward(src_seq, src_lengths, trg_seq, 0)
# # without sos token at the beginning and eos token at the end
logits = outputs[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(logits, trg)
# take without first sos token, and reduce by 2 dimension, take index of max logits (make prediction)
# seq_len * batch size * vocab_size -> seq_len * batch_size
pred_seq = outputs[1:].argmax(2)
# change layout: seq_len * batch_size -> batch_size * seq_len
pred_seq = pred_seq.T
# change layout: seq_len * batch_size -> batch_size * seq_len
trg_batch = trg_seq[1:].T
# compere list of predicted ids for all sequences in a batch to targets
acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))
# need to cast to list of predicted sequences (as list of token ids) [ [seq1_tok1, seq1_tok2, ...seq1_tokN],..., [seqK_tok1, seqK_tok2, ...seqK_tokZ]]
predicted_ids = pred_seq.tolist()
# need to add additional dim to each target reference sequence in order to
# convert to format needed by bleu_score function [ seq1=[ [reference1], [reference2] ], seq2=[ [reference1] ] ]
target_ids = torch.unsqueeze(trg_batch, 1).tolist()
# bleu score needs two arguments
# first: predicted_ids - list of predicted sequences as a list of predicted ids
# second: target_ids - list of references (can be many, list)
bleu_score = plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(
self.device
) # torch.unsqueeze(trg_batchT,1).tolist())
self.log(
"val_loss",
loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True, | self.log(
"val_acc",
acc,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_bleu_idx",
bleu_score,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
return loss, acc, bleu_score
if __name__ == "__main__":
# look to .vscode/launch.json file - there are set some args
parser = ArgumentParser()
# add PROGRAM level args
parser.add_argument("--N_samples", type=int, default=256 * 10)
parser.add_argument("--N_valid_size", type=int, default=32 * 10)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument(
"--dataset_path",
type=str,
default="./data/10k_sent_typos_wikipedia.jsonl",
)
# add model specific args
parser = Seq2SeqCorrector.add_model_specific_args(parser)
# add all the available trainer options to argparse
# ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
dm = tl.ABCSec2SeqDataModule(
batch_size=args.batch_size,
N_random_samples=args.N_samples,
N_valid_size=args.N_valid_size,
num_workers=args.num_workers,
)
# dm = tl.SeqPairJsonDataModule(
# path=args.dataset_path,
# batch_size=args.batch_size,
# n_samples=args.N_samples,
# n_valid_size=args.N_valid_size,
# num_workers=args.num_workers,
# )
dm.prepare_data()
dm.setup("fit")
# to see results run in console
# tensorboard --logdir tb_logs/
# then open browser http://localhost:6006/
log_desc = f"RNN with attention model vocab_size={dm.vocab_size} data_size={dm.dims}, emb_dim={args.emb_dim} hidden_dim={args.hidden_dim}"
print(log_desc)
logger = TensorBoardLogger(
"model_corrector", name="pl_tensorboard_logs", comment=log_desc
)
from pytorch_lightning.callbacks import LearningRateMonitor
lr_monitor = LearningRateMonitor(logging_interval="step")
trainer = pl.Trainer.from_argparse_args(
args, logger=logger, replace_sampler_ddp=False, callbacks=[lr_monitor]
) # , distributed_backend='ddp_cpu')
model_args = vars(args)
model = Seq2SeqCorrector(
vocab_size=dm.vocab_size, padding_index=dm.padding_index, **model_args
)
# most basic trainer, uses good defaults (1 gpu)
trainer.fit(model, dm)
# sample cmd
# python seq2seq_trainer.py --dataset_path /data/10k_sent_typos_wikipedia.jsonl \
# --gpus=2 --max_epoch=5 --batch_size=16 --num_workers=4 \
# --emb_dim=128 --hidden_dim=512 \
# --log_gpu_memory=True --weights_summary=full \
# --N_samples=1000000 --N_valid_size=10000 --distributed_backend=ddp --precision=16 --accumulate_grad_batches=4 --val_check_interval=640 --gradient_clip_val=2.0 --track_grad_norm=2
# tensorboard dev --logdir model_corrector/pl_tensorboard_logs/version?? | sync_dist=True,
) | random_line_split |
seq2seq_trainer.py | import os
import sys
from argparse import ArgumentParser
import random
# # python.dataScience.notebookFileRoot=${fileDirname}
# wdir = os.path.abspath(os.getcwd() + "/../../")
# sys.path.append(wdir)
# print(sys.path)
# print(wdir)
import text_loaders as tl
import rnn_encoder_decoder as encdec
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
import pytorch_lightning.metrics.functional as plfunc
from pytorch_lightning.loggers import TensorBoardLogger
#%%
class Seq2SeqCorrector(pl.LightningModule):
"""Encoder decoder pytorch module for trainning seq2seq model with teacher forcing
Module try to learn mapping from one sequence to antother. This implementation try to learn to reverse string of chars
"""
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--emb_dim", type=int, default=32)
parser.add_argument("--hidden_dim", type=int, default=64)
parser.add_argument("--dropout", type=float, default=0.1)
return parser
def __init__(
self,
vocab_size,
padding_index=0,
emb_dim=8,
hidden_dim=32,
dropout=0.1,
**kwargs,
) -> None:
super().__init__()
self.vocab_size = vocab_size
# dynamic, based on tokenizer vocab size defined in datamodule
self.input_dim = vocab_size
self.output_dim = vocab_size
self.enc_emb_dim = emb_dim # ENC_EMB_DIM
self.dec_emb_dim = emb_dim # DEC_EMB_DIM
self.enc_hid_dim = hidden_dim # ENC_HID_DIM
self.dec_hid_dim = hidden_dim # DEC_HID_DIM
self.enc_dropout = dropout # ENC_DROPOUT
self.dec_dropout = dropout # DEC_DROPOUT
self.pad_idx = padding_index
self.save_hyperparameters()
self.max_epochs = kwargs["max_epochs"]
self.learning_rate = 0.0005
# self.input_src = torch.LongTensor(1).to(self.device)
# self.input_src_len = torch.LongTensor(1).to(self.device)
# self.input_trg = torch.LongTensor(1).to(self.device)
# todo: remove it this blocks loading state_dict from checkpoints
# Error(s) in loading state_dict for Seq2SeqCorrector:
# size mismatch for input_src: copying a param with shape
# torch.Size([201, 18]) from checkpoint,
# the shape in current model is torch.Size([1]).
# self.register_buffer("input_src", torch.LongTensor(1))
# self.register_buffer("input_src_len", torch.LongTensor(1))
# self.register_buffer("input_trg", torch.LongTensor(1))
self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
self.attention = encdec.Attention(self.enc_hid_dim, self.dec_hid_dim)
# INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT
self.encoder = encdec.Encoder(
self.input_dim,
self.enc_emb_dim,
self.enc_hid_dim,
self.dec_hid_dim,
self.enc_dropout,
)
self.decoder = encdec.Decoder(
self.output_dim, # OUTPUT_DIM,
self.dec_emb_dim, # DEC_EMB_DIM,
self.enc_hid_dim, # ENC_HID_DIM,
self.dec_hid_dim, # DEC_HID_DIM,
self.dec_dropout, # DEC_DROPOUT,
self.attention,
)
self._init_weights()
def _init_weights(self):
for name, param in self.named_parameters():
if "weight" in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def create_mask(self, src):
mask = (src != self.pad_idx).permute(1, 0)
return mask
def forward(self, src, src_len, trg, teacher_forcing_ratio=0.5):
# src = [src len, batch size]
# src_len = [batch size]
# trg = [trg len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs TODO: change to registered buffer in pyLightning
decoder_outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(
self.device
)
# encoder_outputs is all hidden states of the input sequence, back and forwards
# hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src, src_len)
mask = self.create_mask(src)
# mask = [batch size, src len]
# without sos token at the beginning and eos token at the end
# first input to the decoder is the <sos> tokens
input = trg[0, :]
# starting with input=<sos> (trg[0]) token and try to predict next token trg[1] so loop starts from 1 range(1, trg_len)
for t in range(1, trg_len):
# insert input token embedding, previous hidden state, all encoder hidden states
# and mask
# receive output tensor (predictions) and new hidden state
output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask)
# place predictions in a tensor holding predictions for each token
decoder_outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = trg[t] if teacher_force else top1
return decoder_outputs
def loss(self, logits, target):
return self._loss(logits, target)
def configure_optimizers(self):
# return optim.Adam(self.parameters(), lr=5e-4)
# optimizer = optim.Adam(self.parameters(), lr=1e-3)
# scheduler = optim.LambdaLR(optimizer, ...)
# return [optimizer], [scheduler]
# optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
# scheduler = optim.lr_scheduler.InverseSquareRootLR(optimizer, self.lr_warmup_steps)
# return (
# [optimizer],
# [
# {
# "scheduler": scheduler,
# "interval": "step",
# "frequency": 1,
# "reduce_on_plateau": False,
# "monitor": "val_loss",
# }
# ],
# )
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
lr_scheduler = {
"scheduler": optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.learning_rate,
steps_per_epoch=int(len(self.train_dataloader())),
epochs=self.max_epochs,
anneal_strategy="linear",
final_div_factor=1000,
pct_start=0.01,
),
"name": "learning_rate",
"interval": "step",
"frequency": 1,
}
return [optimizer], [lr_scheduler]
def training_step(self, batch, batch_idx):
|
def validation_step(self, batch, batch_idx):
"""validation is in eval mode so we do not have to use
placeholder input tensors
"""
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
trg_lengths = trg_batch["trg_lengths"]
outputs = self.forward(src_seq, src_lengths, trg_seq, 0)
# # without sos token at the beginning and eos token at the end
logits = outputs[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(logits, trg)
# take without first sos token, and reduce by 2 dimension, take index of max logits (make prediction)
# seq_len * batch size * vocab_size -> seq_len * batch_size
pred_seq = outputs[1:].argmax(2)
# change layout: seq_len * batch_size -> batch_size * seq_len
pred_seq = pred_seq.T
# change layout: seq_len * batch_size -> batch_size * seq_len
trg_batch = trg_seq[1:].T
# compere list of predicted ids for all sequences in a batch to targets
acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))
# need to cast to list of predicted sequences (as list of token ids) [ [seq1_tok1, seq1_tok2, ...seq1_tokN],..., [seqK_tok1, seqK_tok2, ...seqK_tokZ]]
predicted_ids = pred_seq.tolist()
# need to add additional dim to each target reference sequence in order to
# convert to format needed by bleu_score function [ seq1=[ [reference1], [reference2] ], seq2=[ [reference1] ] ]
target_ids = torch.unsqueeze(trg_batch, 1).tolist()
# bleu score needs two arguments
# first: predicted_ids - list of predicted sequences as a list of predicted ids
# second: target_ids - list of references (can be many, list)
bleu_score = plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(
self.device
) # torch.unsqueeze(trg_batchT,1).tolist())
self.log(
"val_loss",
loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_acc",
acc,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_bleu_idx",
bleu_score,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
return loss, acc, bleu_score
if __name__ == "__main__":
# look to .vscode/launch.json file - there are set some args
parser = ArgumentParser()
# add PROGRAM level args
parser.add_argument("--N_samples", type=int, default=256 * 10)
parser.add_argument("--N_valid_size", type=int, default=32 * 10)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument(
"--dataset_path",
type=str,
default="./data/10k_sent_typos_wikipedia.jsonl",
)
# add model specific args
parser = Seq2SeqCorrector.add_model_specific_args(parser)
# add all the available trainer options to argparse
# ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
dm = tl.ABCSec2SeqDataModule(
batch_size=args.batch_size,
N_random_samples=args.N_samples,
N_valid_size=args.N_valid_size,
num_workers=args.num_workers,
)
# dm = tl.SeqPairJsonDataModule(
# path=args.dataset_path,
# batch_size=args.batch_size,
# n_samples=args.N_samples,
# n_valid_size=args.N_valid_size,
# num_workers=args.num_workers,
# )
dm.prepare_data()
dm.setup("fit")
# to see results run in console
# tensorboard --logdir tb_logs/
# then open browser http://localhost:6006/
log_desc = f"RNN with attention model vocab_size={dm.vocab_size} data_size={dm.dims}, emb_dim={args.emb_dim} hidden_dim={args.hidden_dim}"
print(log_desc)
logger = TensorBoardLogger(
"model_corrector", name="pl_tensorboard_logs", comment=log_desc
)
from pytorch_lightning.callbacks import LearningRateMonitor
lr_monitor = LearningRateMonitor(logging_interval="step")
trainer = pl.Trainer.from_argparse_args(
args, logger=logger, replace_sampler_ddp=False, callbacks=[lr_monitor]
) # , distributed_backend='ddp_cpu')
model_args = vars(args)
model = Seq2SeqCorrector(
vocab_size=dm.vocab_size, padding_index=dm.padding_index, **model_args
)
# most basic trainer, uses good defaults (1 gpu)
trainer.fit(model, dm)
# sample cmd
# python seq2seq_trainer.py --dataset_path /data/10k_sent_typos_wikipedia.jsonl \
# --gpus=2 --max_epoch=5 --batch_size=16 --num_workers=4 \
# --emb_dim=128 --hidden_dim=512 \
# --log_gpu_memory=True --weights_summary=full \
# --N_samples=1000000 --N_valid_size=10000 --distributed_backend=ddp --precision=16 --accumulate_grad_batches=4 --val_check_interval=640 --gradient_clip_val=2.0 --track_grad_norm=2
# tensorboard dev --logdir model_corrector/pl_tensorboard_logs/version??
| src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
# trg_lengths = trg_batch["trg_lengths"]
# resize input buffers, should speed up training and help
# with memory leaks https://discuss.pytorch.org/t/how-to-debug-causes-of-gpu-memory-leaks/6741
# self.input_src.resize_(src_seq.shape).copy_(src_seq)
# self.input_src_len.resize_(src_lengths.shape).copy_(src_lengths)
# self.input_trg.resize_(trg_seq.shape).copy_(trg_seq)
# just for testing lr scheduler
# output = torch.randn((*trg_seq.size(), self.output_dim), requires_grad=True, device=trg_seq.device)
# output = self.forward(self.input_src, self.input_src_len, self.input_trg)
# old version of forward, with tensors from dataloader
output = self.forward(src_seq, src_lengths, trg_seq)
# do not know if this is a problem, loss will be computed with sos token
# without sos token at the beginning and eos token at the end
output = output[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(output, trg)
self.log(
"train_loss",
loss.item(),
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return loss | identifier_body |
PropDetCode.py | # -*- coding: utf-8 -*-
import pickle
import pathlib
from pathlib import Path
from typing import List, Tuple, Dict
import numpy as np
import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.utils.data import Dataset, DataLoader
from torchtext.data import get_tokenizer
from matplotlib import pyplot as plt
"""### **Preprocesare**"""
def read_data(directory):
ids = []
texts = []
labels = []
for f in directory.glob('*.txt'):
id = f.name.replace('article', '').replace('.txt', '')
ids.append(id)
texts.append(f.read_text('utf8'))
labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))
# labels can be empty
return ids, texts, labels
def parse_label(label_path):
labels = []
f = Path(label_path)
if not f.exists():
return labels
for line in open(label_path):
parts = line.strip().split('\t')
labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])
labels = sorted(labels)
if labels:
length = max([label[1] for label in labels])
visit = np.zeros(length)
res = []
for label in labels:
if sum(visit[label[0]:label[1]]):
label[3] = 1
else:
visit[label[0]:label[1]] = 1
res.append(label)
return res
else:
return labels
def clean_text(articles, ids):
texts = []
for article, id in zip(articles, ids):
sentences = article.split('\n')
end = -1
res = []
for sentence in sentences:
start = end + 1
end = start + len(sentence) # length of sequence
if sentence != "": # if not empty line
res.append([id, sentence, start, end])
texts.append(res)
return texts
def make_dataset(texts, lbls):
txt = []
lbl = []
for text, label in zip(texts, lbls):
for Text in text:
txt.append(Text[1])
k = 0
for l in label:
if Text[2] < l[0] < Text[3]:
lbl.append(1)
k = 1
break
elif Text[2] < l[1] < Text[3]:
lbl.append(1)
k = 1
break
if k == 0:
lbl.append(0)
return txt, lbl
directory = pathlib.Path('data/protechn_corpus_eval/train')
ids, texts,lbl = read_data(directory)
ids_train = ids
texts_train = texts
lbl_train = lbl
directory = pathlib.Path('data/protechn_corpus_eval/test')
ids_test, texts_test,lbl_test = read_data(directory)
directory = pathlib.Path('data/protechn_corpus_eval/dev')
ids_dev, texts_dev,lbl_dev = read_data(directory)
txt_train = clean_text(texts_train, ids_train)
txt_test = clean_text(texts_test, ids_test)
txt_dev =clean_text(texts_dev, ids_dev)
train_txt, train_lbl = make_dataset(txt_train, lbl_train)
test_txt, test_lbl = make_dataset(txt_test, lbl_test)
dev_txt, dev_lbl = make_dataset(txt_dev, lbl_dev)
pickle.dump([dev_txt,dev_lbl], open("savedata/dev.txt", "wb"))
pickle.dump([test_txt,test_lbl], open("savedata/test.txt", "wb"))
pickle.dump([train_txt,train_lbl], open("savedata/train.txt", "wb"))
train_txt, train_lbl = pickle.load(open("savedata/train.txt", "rb"))
test_txt, test_lbl = pickle.load(open("savedata/test.txt", "rb"))
dev_txt, dev_lbl = pickle.load(open("savedata/dev.txt", "rb"))
"""### **Dataset+ data_loader**"""
class Vocabulary:
"""
Helper class that maps words to unique indices and the other way around
"""
def __init__(self, tokens: List[str]):
# dictionary that maps words to indices
self.word_to_idx = {'<PAD>': 0}
for idx, tok in enumerate(tokens, 1):
self.word_to_idx[tok] = idx
# dictionary that maps indices to words
self.idx_to_word = {}
for tok, idx in self.word_to_idx.items():
self.idx_to_word[idx] = tok
def get_token_at_index(self, idx: int):
return self.idx_to_word[idx]
def get_index_of_token(self, token: str):
return self.word_to_idx[token]
def size(self):
return len(self.word_to_idx)
class PropagandaDataset(Dataset):
def __init__(self,
fold: str,
examples: List[str],
labels: List[int],
vocab: Vocabulary):
"""
:type vocab: object
:param fold: 'train'/'eval'/'test'
:param examples: List of sentences/paragraphs
:param labels: List of labels (1 if propaganda, 0 otherwise)
"""
self.fold = fold
self.examples = examples
self.labels = labels
self.vocab = vocab
def __getitem__(self, index: int) -> (torch.Tensor, torch.Tensor):
"""
This function converts an example to a Tensor containing the indices
:param index: position of example to be retrieved.
"""
# retrieve sentence and label (correct class index)
example, label = self.examples[index], self.labels[index]
# tokenize sentence into words and other symbols
tokenizer = get_tokenizer("spacy")
tokens = tokenizer(example)
# convert tokens to their corresponding indices, according to
# vocabulary
token_indices = []
for i in tokens:
token_indices.append(self.vocab.get_index_of_token(i))
return torch.LongTensor(token_indices), torch.LongTensor(label)
def __len__(self):
"""
Return the size of this dataset. This is given by the number
of sentences.
"""
return len(self.examples)
def collate_sentences(batch: List[Tuple]):
"""
This function converts a list of batch_size examples to
a Tensor of size batch_size x max_len
batch: [(example_1_tensor, example_1_label),
...
(example_batch_size_tensor, example_batch_size_label)]
"""
# fill this list with all the labels in the batch
batch_labels = []
# we need to find the maximum length of a sentence in this batch
max_len = 0
for i in batch:
if len(i[0]) > max_len:
max_len = len(i[0])
batch_size = len(batch)
# print('batch size',batch_size)
# initialize a Tensor filled with zeros (aka index of <PAD>)
batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)
# fill each row idx in batch_sentences with the corresponding
# sequence tensor
#
# ... batch_sentences[idx, ...] = ...
for idx in range(0, batch_size):
# print(idx)
# print(len(batch[idx][0]))
# print(len(batch_sentences[idx]))
batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]
print(batch[idx])
batch_labels.append(batch[idx][1])
# print(batch_sentences[idx])
print(type(batch_labels))
# batch_labels = [torch.LongTensor(x) for x in batch_labels]
batch_labels = torch.tensor(batch_labels)
# print(batch_labels)
return batch_sentences, batch_labels
def | (txt: List[Tuple]):
tokenizer = get_tokenizer("spacy")
list_v = []
for i in txt:
tok = tokenizer(i)
for j in tok:
if list_v.count(j) == 0:
list_v.append(j)
vocab = Vocabulary(tokens=list_v)
return vocab
full_text = train_txt + dev_txt
vocab = fill_vocab(full_text)
test_vocab = fill_vocab(test_txt)
train_vocab = fill_vocab(train_txt)
dev_vocab = fill_vocab(dev_txt)
pickle.dump(dev_vocab, open("savedata/dev_vocab.txt", "wb"))
pickle.dump(test_vocab, open("savedata/test_vocab.txt", "wb"))
pickle.dump(train_vocab, open("savedata/train_vocab.txt", "wb"))
pickle.dump(vocab, open("savedata/vocab.txt", "wb"))
dev_vocab = pickle.load(open("savedata/dev_vocab.txt","rb"))
test_vocab = pickle.load(open("savedata/test_vocab.txt","rb"))
train_vocab = pickle.load(open("savedata/train_vocab.txt","rb"))
vocab = pickle.load(open("savedata/vocab.txt", "rb"))
dataset_train = PropagandaDataset('train', train_txt, train_lbl, train_vocab)
train_loader = DataLoader(dataset_train, batch_size=16, collate_fn=collate_sentences)
dataset_test = PropagandaDataset('train', test_txt, test_lbl, test_vocab)
test_loader = DataLoader(dataset_test, batch_size=16, collate_fn=collate_sentences)
dataset_dev = PropagandaDataset('train', dev_txt, dev_lbl, dev_vocab)
dev_loader = DataLoader(dataset_dev, batch_size=16, collate_fn=collate_sentences)
pickle.dump(train_loader, open("savedata/train_loaded.txt", "wb"))
pickle.dump(test_loader, open("savedata/test_loaded.txt", "wb"))
pickle.dump(dev_loader, open("savedata/dev_loaded.txt", "wb"))
train_loader = pickle.load(open("savedata/train_loaded.txt", "rb"))
test_loader = pickle.load(open("savedata/test_loaded.txt", "rb"))
dev_loader = pickle.load(open("savedata/dev_loaded.txt", "rb"))
"""### model"""
############################## PARAMETERS ######################################
_hyperparameters_dict = {
"batch_size": 64,
"num_epochs": 10, # 10,
"max_len": 250,
"embedding_size": 128, # 256,
"rnn_size": 256, # 1024,
"learning_algo": "adam",
"learning_rate": 0.001,
"max_grad_norm": 5.0
}
class RNN(nn.Module):
def __init__(self, vocab_size: int, char_embedding_size: int,
rnn_size: int):
super().__init__()
self.vocab_size = vocab_size
self.char_embedding_size = char_embedding_size
self.rnn_size = rnn_size
self.dropout = nn.Dropout(p=0.3)
# instantiate Modules with the correct arguments
self.embedding = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=char_embedding_size)
self.rnn = nn.LSTM(input_size=char_embedding_size,
hidden_size=rnn_size, bidirectional=True)
# self.rnn_cell = nn.GRUCell(input_size = char_embedding_size,
# hidden_size = rnn_size)
self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)
# self.softmax = nn.Softmax(dim = 2)
self.loss = nn.CrossEntropyLoss()
def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):
"""
Computes loss for a batch of sequences. The sequence loss is the
average of the individual losses at each timestep. The batch loss is
the average of sequence losses across all batches.
:param logits: unnormalized probabilities for T timesteps, size
batch_size x max_timesteps x vocab_size
:param y: ground truth values (index of correct characters), size
batch_size x max_timesteps
:returns: loss as a scalar
"""
#
# logits: B x T x vocab_size
# B x T
# cross entropy: B x vocab_size x T
# B x T
# vision: B x num_classes
# B
return self.loss(logits, y)
def get_logits(self, hidden_states: torch.FloatTensor,
temperature: float = 1.0):
"""
Computes the unnormalized probabilities from hidden states. Optionally
divide logits by a temperature, in order to influence predictions at
test time (https://www.quora.com/What-is-Temperature-in-LSTM)
:param hidden_states: tensor of size batch_size x timesteps x rnn_size
:param temperature: coefficient that scales outputs before turning them
to probabilities. A low temperature (0.1) results in more conservative
predictions, while a higher temperature (0.9) results in more diverse
predictions
:return: tensor of size batch_size x timesteps x vocab_size
"""
return self.logits(hidden_states) / temperature
def forward(self, batch: torch.LongTensor,
hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:
"""
Computes the hidden states for the current batch (x, y).
:param x: input of size batch_size x max_len
:param hidden_start: hidden state at time step t = 0,
size batch_size x rnn_size
:return: hidden states at all timesteps,
size batch_size x timesteps x rnn_size
"""
# max_len = x.size(1)
# x,label = batch
# batch_size x max_len x embedding_dim
x_embedded = self.embedding(batch)
# x_drop = self.dropout
x_drop = self.dropout(x_embedded)
# compute hidden states and logits for each time step
# hidden_states_list = []
# prev_hidden = hidden_start
hidden_state = self.rnn(x_drop)[0]
# print(hidden_state)
# print(hidden_state[0].shape)
# print(hidden_state[1].shape)
# hidden_state = hidden_state.permute(2,1,0)
# hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])
# hidden_state_maxPooled = hidden_state.permute(2,1,0)
hidden_state_pooled, _ = torch.max(hidden_state, dim=1)
output = self.get_logits(hidden_state_pooled)
# Loss = self.loss(output, y)
# hidden_state = softmax(logits(hidden_state))
# batch_size x max_len x rnn_size
# hidden_states = torch.stack(hidden_states_list, dim=1)
return output
# instantiate the RNNLM module
network = RNN(vocab.size(),
_hyperparameters_dict['embedding_size'],
_hyperparameters_dict['rnn_size'])
# if torch.cuda.is_available():
# device = torch.device('cuda:0')
# else:
# device = torch.device('cpu')
# move network to GPU if available
# network = network.to(device)
# device = torch.device('cpu')
# network = network.to(device)
optimizer = Adam(params=network.parameters(), lr=0.001)
# CHECKPOINT: make sure you understand each parameter size
print("Neural network parameters: ")
for param_name, param in network.named_parameters():
print("\t" + param_name, " size: ", param.size())
"""# Training/evaluation loop"""
# Commented out IPython magic to ensure Python compatibility.
class Trainer:
def __init__(self, model: nn.Module,
train_data: torch.LongTensor,
dev_data: torch.LongTensor,
vocab: Vocabulary,
hyperparams: Dict):
self.model = model
self.train_data = train_data
self.dev_data = dev_data
self.vocab = vocab
# self.device = torch.device('cuda:0')
if hyperparams['learning_algo'] == 'adam':
self.optimizer = Adam(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
else:
self.optimizer = SGD(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
self.num_epochs = hyperparams['num_epochs']
self.max_len = hyperparams['max_len']
self.batch_size = hyperparams['batch_size']
self.rnn_size = hyperparams['rnn_size']
self.max_grad_norm = hyperparams['max_grad_norm']
# number of characters in training/dev data
self.train_size = len(train_data)
self.dev_size = len(dev_data)
# number of sequences (X, Y) used for training
self.num_train_examples = \
self.train_size // (self.batch_size * self.max_len) * self.batch_size
def train_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the training set
:param epoch_num: number of current epoch
"""
self.model.train()
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size)
# for batch_num, (x, y) in enumerate(make_batches(self.train_data,
# self.batch_size,
# self.max_len)):
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients in train epoch
self.optimizer.zero_grad()
x = len(batch_tuple[0])
y = len(batch_tuple[0][0])
# compute hidden states
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# compute unnormalized probabilities
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
# compute loss
# scalar
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
# backpropagation (gradient of loss wrt parameters)
batch_loss.backward()
# clip gradients if they get too large
torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),
self.max_grad_norm)
# update parameters
self.optimizer.step()
# we use a stateful RNN, which means the first hidden state for the
# next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:] # add comment
if batch_num % 100 == 0:
print("epoch %d, %d/%d examples, batch loss = %f"
% (epoch_num, (batch_num + 1) * self.batch_size,
self.num_train_examples, batch_loss.item()))
epoch_loss /= (batch_num + 1)
return epoch_loss
def eval_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the validation set
:param epoch_num: number of current epoch
"""
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)
with torch.no_grad():
# for batch_num, (x, y) in enumerate(make_batches(self.dev_data,
# self.batch_size,
# self.max_len)):
acc = 0;
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients
# self.optimizer.zero_grad()
# x = len(batch_tuple[0])
# y = len(batch_tuple[0][0])
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
hidden_states_m = torch.argmax(hidden_states, dim=1)
acc += sum(hidden_states_m == y).item()
# we use a stateful RNN, which means the first hidden state for
# the next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:]
epoch_loss /= (batch_num + 1)
return epoch_loss, acc
def train(self) -> Dict:
train_losses, dev_losses, dev_acc = [], [], []
for epoch in range(self.num_epochs):
epoch_train_loss = self.train_epoch(epoch)
epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)
train_losses.append(epoch_train_loss)
dev_losses.append(epoch_dev_loss)
dev_acc.append(epoch_dev_train)
return {"train_losses": train_losses,
"dev_losses": dev_losses,
"dev_acc": epoch_dev_train}
def plot_losses(metrics: Dict):
"""
Plots training/validation losses.
:param metrics: dictionar
"""
plt.figure()
plt.plot(metrics['train_losses'], c='b', label='Train')
plt.plot(metrics['dev_losses'], c='g', label='Valid')
plt.ylabel('Loss')
plt.xlabel('Iteration')
plt.legend()
plt.show()
# op= torch.rand(4)
# thx = torch.rand(4)
# thx[0] = op[0]
# t = thx==op
# print(t)
# print(sum(t).item())
# train network for some epoch
trainer = Trainer(network, train_loader, dev_loader, vocab, _hyperparameters_dict)
metrics = trainer.train()
# plot training and validations losses each epoch
plot_losses(metrics)
# for i in train_loader:
# print(len(i[0][0]))
# print(len(i[0]))
# print(i[0])
# x = 1
# while (True)
# x = 0
| fill_vocab | identifier_name |
PropDetCode.py | # -*- coding: utf-8 -*-
import pickle
import pathlib
from pathlib import Path
from typing import List, Tuple, Dict
import numpy as np
import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.utils.data import Dataset, DataLoader
from torchtext.data import get_tokenizer
from matplotlib import pyplot as plt
"""### **Preprocesare**"""
def read_data(directory):
ids = []
texts = []
labels = []
for f in directory.glob('*.txt'):
id = f.name.replace('article', '').replace('.txt', '')
ids.append(id)
texts.append(f.read_text('utf8'))
labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))
# labels can be empty
return ids, texts, labels
def parse_label(label_path):
labels = []
f = Path(label_path)
if not f.exists():
return labels
for line in open(label_path):
parts = line.strip().split('\t')
labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])
labels = sorted(labels)
if labels:
length = max([label[1] for label in labels])
visit = np.zeros(length)
res = []
for label in labels:
if sum(visit[label[0]:label[1]]):
|
else:
visit[label[0]:label[1]] = 1
res.append(label)
return res
else:
return labels
def clean_text(articles, ids):
texts = []
for article, id in zip(articles, ids):
sentences = article.split('\n')
end = -1
res = []
for sentence in sentences:
start = end + 1
end = start + len(sentence) # length of sequence
if sentence != "": # if not empty line
res.append([id, sentence, start, end])
texts.append(res)
return texts
def make_dataset(texts, lbls):
txt = []
lbl = []
for text, label in zip(texts, lbls):
for Text in text:
txt.append(Text[1])
k = 0
for l in label:
if Text[2] < l[0] < Text[3]:
lbl.append(1)
k = 1
break
elif Text[2] < l[1] < Text[3]:
lbl.append(1)
k = 1
break
if k == 0:
lbl.append(0)
return txt, lbl
directory = pathlib.Path('data/protechn_corpus_eval/train')
ids, texts,lbl = read_data(directory)
ids_train = ids
texts_train = texts
lbl_train = lbl
directory = pathlib.Path('data/protechn_corpus_eval/test')
ids_test, texts_test,lbl_test = read_data(directory)
directory = pathlib.Path('data/protechn_corpus_eval/dev')
ids_dev, texts_dev,lbl_dev = read_data(directory)
txt_train = clean_text(texts_train, ids_train)
txt_test = clean_text(texts_test, ids_test)
txt_dev =clean_text(texts_dev, ids_dev)
train_txt, train_lbl = make_dataset(txt_train, lbl_train)
test_txt, test_lbl = make_dataset(txt_test, lbl_test)
dev_txt, dev_lbl = make_dataset(txt_dev, lbl_dev)
pickle.dump([dev_txt,dev_lbl], open("savedata/dev.txt", "wb"))
pickle.dump([test_txt,test_lbl], open("savedata/test.txt", "wb"))
pickle.dump([train_txt,train_lbl], open("savedata/train.txt", "wb"))
train_txt, train_lbl = pickle.load(open("savedata/train.txt", "rb"))
test_txt, test_lbl = pickle.load(open("savedata/test.txt", "rb"))
dev_txt, dev_lbl = pickle.load(open("savedata/dev.txt", "rb"))
"""### **Dataset+ data_loader**"""
class Vocabulary:
"""
Helper class that maps words to unique indices and the other way around
"""
def __init__(self, tokens: List[str]):
# dictionary that maps words to indices
self.word_to_idx = {'<PAD>': 0}
for idx, tok in enumerate(tokens, 1):
self.word_to_idx[tok] = idx
# dictionary that maps indices to words
self.idx_to_word = {}
for tok, idx in self.word_to_idx.items():
self.idx_to_word[idx] = tok
def get_token_at_index(self, idx: int):
return self.idx_to_word[idx]
def get_index_of_token(self, token: str):
return self.word_to_idx[token]
def size(self):
return len(self.word_to_idx)
class PropagandaDataset(Dataset):
def __init__(self,
fold: str,
examples: List[str],
labels: List[int],
vocab: Vocabulary):
"""
:type vocab: object
:param fold: 'train'/'eval'/'test'
:param examples: List of sentences/paragraphs
:param labels: List of labels (1 if propaganda, 0 otherwise)
"""
self.fold = fold
self.examples = examples
self.labels = labels
self.vocab = vocab
def __getitem__(self, index: int) -> (torch.Tensor, torch.Tensor):
"""
This function converts an example to a Tensor containing the indices
:param index: position of example to be retrieved.
"""
# retrieve sentence and label (correct class index)
example, label = self.examples[index], self.labels[index]
# tokenize sentence into words and other symbols
tokenizer = get_tokenizer("spacy")
tokens = tokenizer(example)
# convert tokens to their corresponding indices, according to
# vocabulary
token_indices = []
for i in tokens:
token_indices.append(self.vocab.get_index_of_token(i))
return torch.LongTensor(token_indices), torch.LongTensor(label)
def __len__(self):
"""
Return the size of this dataset. This is given by the number
of sentences.
"""
return len(self.examples)
def collate_sentences(batch: List[Tuple]):
"""
This function converts a list of batch_size examples to
a Tensor of size batch_size x max_len
batch: [(example_1_tensor, example_1_label),
...
(example_batch_size_tensor, example_batch_size_label)]
"""
# fill this list with all the labels in the batch
batch_labels = []
# we need to find the maximum length of a sentence in this batch
max_len = 0
for i in batch:
if len(i[0]) > max_len:
max_len = len(i[0])
batch_size = len(batch)
# print('batch size',batch_size)
# initialize a Tensor filled with zeros (aka index of <PAD>)
batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)
# fill each row idx in batch_sentences with the corresponding
# sequence tensor
#
# ... batch_sentences[idx, ...] = ...
for idx in range(0, batch_size):
# print(idx)
# print(len(batch[idx][0]))
# print(len(batch_sentences[idx]))
batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]
print(batch[idx])
batch_labels.append(batch[idx][1])
# print(batch_sentences[idx])
print(type(batch_labels))
# batch_labels = [torch.LongTensor(x) for x in batch_labels]
batch_labels = torch.tensor(batch_labels)
# print(batch_labels)
return batch_sentences, batch_labels
def fill_vocab(txt: List[Tuple]):
tokenizer = get_tokenizer("spacy")
list_v = []
for i in txt:
tok = tokenizer(i)
for j in tok:
if list_v.count(j) == 0:
list_v.append(j)
vocab = Vocabulary(tokens=list_v)
return vocab
full_text = train_txt + dev_txt
vocab = fill_vocab(full_text)
test_vocab = fill_vocab(test_txt)
train_vocab = fill_vocab(train_txt)
dev_vocab = fill_vocab(dev_txt)
pickle.dump(dev_vocab, open("savedata/dev_vocab.txt", "wb"))
pickle.dump(test_vocab, open("savedata/test_vocab.txt", "wb"))
pickle.dump(train_vocab, open("savedata/train_vocab.txt", "wb"))
pickle.dump(vocab, open("savedata/vocab.txt", "wb"))
dev_vocab = pickle.load(open("savedata/dev_vocab.txt","rb"))
test_vocab = pickle.load(open("savedata/test_vocab.txt","rb"))
train_vocab = pickle.load(open("savedata/train_vocab.txt","rb"))
vocab = pickle.load(open("savedata/vocab.txt", "rb"))
dataset_train = PropagandaDataset('train', train_txt, train_lbl, train_vocab)
train_loader = DataLoader(dataset_train, batch_size=16, collate_fn=collate_sentences)
dataset_test = PropagandaDataset('train', test_txt, test_lbl, test_vocab)
test_loader = DataLoader(dataset_test, batch_size=16, collate_fn=collate_sentences)
dataset_dev = PropagandaDataset('train', dev_txt, dev_lbl, dev_vocab)
dev_loader = DataLoader(dataset_dev, batch_size=16, collate_fn=collate_sentences)
pickle.dump(train_loader, open("savedata/train_loaded.txt", "wb"))
pickle.dump(test_loader, open("savedata/test_loaded.txt", "wb"))
pickle.dump(dev_loader, open("savedata/dev_loaded.txt", "wb"))
train_loader = pickle.load(open("savedata/train_loaded.txt", "rb"))
test_loader = pickle.load(open("savedata/test_loaded.txt", "rb"))
dev_loader = pickle.load(open("savedata/dev_loaded.txt", "rb"))
"""### model"""
############################## PARAMETERS ######################################
_hyperparameters_dict = {
"batch_size": 64,
"num_epochs": 10, # 10,
"max_len": 250,
"embedding_size": 128, # 256,
"rnn_size": 256, # 1024,
"learning_algo": "adam",
"learning_rate": 0.001,
"max_grad_norm": 5.0
}
class RNN(nn.Module):
def __init__(self, vocab_size: int, char_embedding_size: int,
rnn_size: int):
super().__init__()
self.vocab_size = vocab_size
self.char_embedding_size = char_embedding_size
self.rnn_size = rnn_size
self.dropout = nn.Dropout(p=0.3)
# instantiate Modules with the correct arguments
self.embedding = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=char_embedding_size)
self.rnn = nn.LSTM(input_size=char_embedding_size,
hidden_size=rnn_size, bidirectional=True)
# self.rnn_cell = nn.GRUCell(input_size = char_embedding_size,
# hidden_size = rnn_size)
self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)
# self.softmax = nn.Softmax(dim = 2)
self.loss = nn.CrossEntropyLoss()
def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):
"""
Computes loss for a batch of sequences. The sequence loss is the
average of the individual losses at each timestep. The batch loss is
the average of sequence losses across all batches.
:param logits: unnormalized probabilities for T timesteps, size
batch_size x max_timesteps x vocab_size
:param y: ground truth values (index of correct characters), size
batch_size x max_timesteps
:returns: loss as a scalar
"""
#
# logits: B x T x vocab_size
# B x T
# cross entropy: B x vocab_size x T
# B x T
# vision: B x num_classes
# B
return self.loss(logits, y)
def get_logits(self, hidden_states: torch.FloatTensor,
temperature: float = 1.0):
"""
Computes the unnormalized probabilities from hidden states. Optionally
divide logits by a temperature, in order to influence predictions at
test time (https://www.quora.com/What-is-Temperature-in-LSTM)
:param hidden_states: tensor of size batch_size x timesteps x rnn_size
:param temperature: coefficient that scales outputs before turning them
to probabilities. A low temperature (0.1) results in more conservative
predictions, while a higher temperature (0.9) results in more diverse
predictions
:return: tensor of size batch_size x timesteps x vocab_size
"""
return self.logits(hidden_states) / temperature
def forward(self, batch: torch.LongTensor,
hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:
"""
Computes the hidden states for the current batch (x, y).
:param x: input of size batch_size x max_len
:param hidden_start: hidden state at time step t = 0,
size batch_size x rnn_size
:return: hidden states at all timesteps,
size batch_size x timesteps x rnn_size
"""
# max_len = x.size(1)
# x,label = batch
# batch_size x max_len x embedding_dim
x_embedded = self.embedding(batch)
# x_drop = self.dropout
x_drop = self.dropout(x_embedded)
# compute hidden states and logits for each time step
# hidden_states_list = []
# prev_hidden = hidden_start
hidden_state = self.rnn(x_drop)[0]
# print(hidden_state)
# print(hidden_state[0].shape)
# print(hidden_state[1].shape)
# hidden_state = hidden_state.permute(2,1,0)
# hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])
# hidden_state_maxPooled = hidden_state.permute(2,1,0)
hidden_state_pooled, _ = torch.max(hidden_state, dim=1)
output = self.get_logits(hidden_state_pooled)
# Loss = self.loss(output, y)
# hidden_state = softmax(logits(hidden_state))
# batch_size x max_len x rnn_size
# hidden_states = torch.stack(hidden_states_list, dim=1)
return output
# instantiate the RNNLM module
network = RNN(vocab.size(),
_hyperparameters_dict['embedding_size'],
_hyperparameters_dict['rnn_size'])
# if torch.cuda.is_available():
# device = torch.device('cuda:0')
# else:
# device = torch.device('cpu')
# move network to GPU if available
# network = network.to(device)
# device = torch.device('cpu')
# network = network.to(device)
optimizer = Adam(params=network.parameters(), lr=0.001)
# CHECKPOINT: make sure you understand each parameter size
print("Neural network parameters: ")
for param_name, param in network.named_parameters():
print("\t" + param_name, " size: ", param.size())
"""# Training/evaluation loop"""
# Commented out IPython magic to ensure Python compatibility.
class Trainer:
def __init__(self, model: nn.Module,
train_data: torch.LongTensor,
dev_data: torch.LongTensor,
vocab: Vocabulary,
hyperparams: Dict):
self.model = model
self.train_data = train_data
self.dev_data = dev_data
self.vocab = vocab
# self.device = torch.device('cuda:0')
if hyperparams['learning_algo'] == 'adam':
self.optimizer = Adam(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
else:
self.optimizer = SGD(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
self.num_epochs = hyperparams['num_epochs']
self.max_len = hyperparams['max_len']
self.batch_size = hyperparams['batch_size']
self.rnn_size = hyperparams['rnn_size']
self.max_grad_norm = hyperparams['max_grad_norm']
# number of characters in training/dev data
self.train_size = len(train_data)
self.dev_size = len(dev_data)
# number of sequences (X, Y) used for training
self.num_train_examples = \
self.train_size // (self.batch_size * self.max_len) * self.batch_size
def train_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the training set
:param epoch_num: number of current epoch
"""
self.model.train()
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size)
# for batch_num, (x, y) in enumerate(make_batches(self.train_data,
# self.batch_size,
# self.max_len)):
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients in train epoch
self.optimizer.zero_grad()
x = len(batch_tuple[0])
y = len(batch_tuple[0][0])
# compute hidden states
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# compute unnormalized probabilities
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
# compute loss
# scalar
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
# backpropagation (gradient of loss wrt parameters)
batch_loss.backward()
# clip gradients if they get too large
torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),
self.max_grad_norm)
# update parameters
self.optimizer.step()
# we use a stateful RNN, which means the first hidden state for the
# next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:] # add comment
if batch_num % 100 == 0:
print("epoch %d, %d/%d examples, batch loss = %f"
% (epoch_num, (batch_num + 1) * self.batch_size,
self.num_train_examples, batch_loss.item()))
epoch_loss /= (batch_num + 1)
return epoch_loss
def eval_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the validation set
:param epoch_num: number of current epoch
"""
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)
with torch.no_grad():
# for batch_num, (x, y) in enumerate(make_batches(self.dev_data,
# self.batch_size,
# self.max_len)):
acc = 0;
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients
# self.optimizer.zero_grad()
# x = len(batch_tuple[0])
# y = len(batch_tuple[0][0])
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
hidden_states_m = torch.argmax(hidden_states, dim=1)
acc += sum(hidden_states_m == y).item()
# we use a stateful RNN, which means the first hidden state for
# the next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:]
epoch_loss /= (batch_num + 1)
return epoch_loss, acc
def train(self) -> Dict:
train_losses, dev_losses, dev_acc = [], [], []
for epoch in range(self.num_epochs):
epoch_train_loss = self.train_epoch(epoch)
epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)
train_losses.append(epoch_train_loss)
dev_losses.append(epoch_dev_loss)
dev_acc.append(epoch_dev_train)
return {"train_losses": train_losses,
"dev_losses": dev_losses,
"dev_acc": epoch_dev_train}
def plot_losses(metrics: Dict):
"""
Plots training/validation losses.
:param metrics: dictionar
"""
plt.figure()
plt.plot(metrics['train_losses'], c='b', label='Train')
plt.plot(metrics['dev_losses'], c='g', label='Valid')
plt.ylabel('Loss')
plt.xlabel('Iteration')
plt.legend()
plt.show()
# op= torch.rand(4)
# thx = torch.rand(4)
# thx[0] = op[0]
# t = thx==op
# print(t)
# print(sum(t).item())
# train network for some epoch
trainer = Trainer(network, train_loader, dev_loader, vocab, _hyperparameters_dict)
metrics = trainer.train()
# plot training and validations losses each epoch
plot_losses(metrics)
# for i in train_loader:
# print(len(i[0][0]))
# print(len(i[0]))
# print(i[0])
# x = 1
# while (True)
# x = 0
| label[3] = 1 | conditional_block |
PropDetCode.py | # -*- coding: utf-8 -*-
import pickle
import pathlib
from pathlib import Path
from typing import List, Tuple, Dict
import numpy as np
import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.utils.data import Dataset, DataLoader
from torchtext.data import get_tokenizer
from matplotlib import pyplot as plt
"""### **Preprocesare**"""
def read_data(directory):
ids = []
texts = []
labels = []
for f in directory.glob('*.txt'):
id = f.name.replace('article', '').replace('.txt', '')
ids.append(id)
texts.append(f.read_text('utf8'))
labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))
# labels can be empty
return ids, texts, labels
def parse_label(label_path):
labels = []
f = Path(label_path)
if not f.exists():
return labels
for line in open(label_path):
parts = line.strip().split('\t')
labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])
labels = sorted(labels)
if labels:
length = max([label[1] for label in labels])
visit = np.zeros(length)
res = []
for label in labels:
if sum(visit[label[0]:label[1]]):
label[3] = 1
else:
visit[label[0]:label[1]] = 1
res.append(label)
return res
else:
return labels
def clean_text(articles, ids):
texts = []
for article, id in zip(articles, ids):
sentences = article.split('\n')
end = -1
res = []
for sentence in sentences:
start = end + 1
end = start + len(sentence) # length of sequence
if sentence != "": # if not empty line
res.append([id, sentence, start, end])
texts.append(res)
return texts
def make_dataset(texts, lbls):
txt = []
lbl = []
for text, label in zip(texts, lbls):
for Text in text:
txt.append(Text[1])
k = 0
for l in label:
if Text[2] < l[0] < Text[3]:
lbl.append(1)
k = 1
break
elif Text[2] < l[1] < Text[3]:
lbl.append(1)
k = 1
break
if k == 0:
lbl.append(0)
return txt, lbl
directory = pathlib.Path('data/protechn_corpus_eval/train')
ids, texts,lbl = read_data(directory)
ids_train = ids
texts_train = texts
lbl_train = lbl
directory = pathlib.Path('data/protechn_corpus_eval/test')
ids_test, texts_test,lbl_test = read_data(directory)
directory = pathlib.Path('data/protechn_corpus_eval/dev')
ids_dev, texts_dev,lbl_dev = read_data(directory)
txt_train = clean_text(texts_train, ids_train)
txt_test = clean_text(texts_test, ids_test)
txt_dev =clean_text(texts_dev, ids_dev)
train_txt, train_lbl = make_dataset(txt_train, lbl_train)
test_txt, test_lbl = make_dataset(txt_test, lbl_test)
dev_txt, dev_lbl = make_dataset(txt_dev, lbl_dev)
pickle.dump([dev_txt,dev_lbl], open("savedata/dev.txt", "wb"))
pickle.dump([test_txt,test_lbl], open("savedata/test.txt", "wb"))
pickle.dump([train_txt,train_lbl], open("savedata/train.txt", "wb"))
train_txt, train_lbl = pickle.load(open("savedata/train.txt", "rb"))
test_txt, test_lbl = pickle.load(open("savedata/test.txt", "rb"))
dev_txt, dev_lbl = pickle.load(open("savedata/dev.txt", "rb"))
"""### **Dataset+ data_loader**"""
class Vocabulary:
"""
Helper class that maps words to unique indices and the other way around
"""
def __init__(self, tokens: List[str]):
# dictionary that maps words to indices
self.word_to_idx = {'<PAD>': 0}
for idx, tok in enumerate(tokens, 1):
self.word_to_idx[tok] = idx
# dictionary that maps indices to words
self.idx_to_word = {}
for tok, idx in self.word_to_idx.items():
self.idx_to_word[idx] = tok
def get_token_at_index(self, idx: int):
return self.idx_to_word[idx]
def get_index_of_token(self, token: str):
return self.word_to_idx[token]
def size(self):
return len(self.word_to_idx)
class PropagandaDataset(Dataset):
def __init__(self,
fold: str,
examples: List[str],
labels: List[int],
vocab: Vocabulary):
"""
:type vocab: object
:param fold: 'train'/'eval'/'test'
:param examples: List of sentences/paragraphs
:param labels: List of labels (1 if propaganda, 0 otherwise)
"""
self.fold = fold
self.examples = examples
self.labels = labels
self.vocab = vocab
def __getitem__(self, index: int) -> (torch.Tensor, torch.Tensor):
"""
This function converts an example to a Tensor containing the indices
:param index: position of example to be retrieved.
"""
# retrieve sentence and label (correct class index)
example, label = self.examples[index], self.labels[index]
# tokenize sentence into words and other symbols
tokenizer = get_tokenizer("spacy")
tokens = tokenizer(example)
# convert tokens to their corresponding indices, according to
# vocabulary
token_indices = []
for i in tokens:
token_indices.append(self.vocab.get_index_of_token(i))
return torch.LongTensor(token_indices), torch.LongTensor(label)
def __len__(self):
"""
Return the size of this dataset. This is given by the number
of sentences.
"""
return len(self.examples)
def collate_sentences(batch: List[Tuple]):
"""
This function converts a list of batch_size examples to
a Tensor of size batch_size x max_len
batch: [(example_1_tensor, example_1_label),
...
(example_batch_size_tensor, example_batch_size_label)]
"""
# fill this list with all the labels in the batch
batch_labels = []
# we need to find the maximum length of a sentence in this batch
max_len = 0
for i in batch:
if len(i[0]) > max_len:
max_len = len(i[0])
batch_size = len(batch)
# print('batch size',batch_size)
# initialize a Tensor filled with zeros (aka index of <PAD>)
batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)
# fill each row idx in batch_sentences with the corresponding
# sequence tensor
#
# ... batch_sentences[idx, ...] = ...
for idx in range(0, batch_size):
# print(idx)
# print(len(batch[idx][0]))
# print(len(batch_sentences[idx]))
batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]
print(batch[idx])
batch_labels.append(batch[idx][1])
# print(batch_sentences[idx])
print(type(batch_labels))
# batch_labels = [torch.LongTensor(x) for x in batch_labels]
batch_labels = torch.tensor(batch_labels)
# print(batch_labels)
return batch_sentences, batch_labels
def fill_vocab(txt: List[Tuple]):
tokenizer = get_tokenizer("spacy")
list_v = []
for i in txt:
tok = tokenizer(i)
for j in tok:
if list_v.count(j) == 0:
list_v.append(j)
vocab = Vocabulary(tokens=list_v)
return vocab
full_text = train_txt + dev_txt
vocab = fill_vocab(full_text)
test_vocab = fill_vocab(test_txt)
train_vocab = fill_vocab(train_txt)
dev_vocab = fill_vocab(dev_txt)
pickle.dump(dev_vocab, open("savedata/dev_vocab.txt", "wb"))
pickle.dump(test_vocab, open("savedata/test_vocab.txt", "wb"))
pickle.dump(train_vocab, open("savedata/train_vocab.txt", "wb"))
pickle.dump(vocab, open("savedata/vocab.txt", "wb"))
dev_vocab = pickle.load(open("savedata/dev_vocab.txt","rb"))
test_vocab = pickle.load(open("savedata/test_vocab.txt","rb"))
train_vocab = pickle.load(open("savedata/train_vocab.txt","rb"))
vocab = pickle.load(open("savedata/vocab.txt", "rb"))
dataset_train = PropagandaDataset('train', train_txt, train_lbl, train_vocab)
train_loader = DataLoader(dataset_train, batch_size=16, collate_fn=collate_sentences)
dataset_test = PropagandaDataset('train', test_txt, test_lbl, test_vocab)
test_loader = DataLoader(dataset_test, batch_size=16, collate_fn=collate_sentences)
dataset_dev = PropagandaDataset('train', dev_txt, dev_lbl, dev_vocab)
dev_loader = DataLoader(dataset_dev, batch_size=16, collate_fn=collate_sentences)
pickle.dump(train_loader, open("savedata/train_loaded.txt", "wb"))
pickle.dump(test_loader, open("savedata/test_loaded.txt", "wb"))
pickle.dump(dev_loader, open("savedata/dev_loaded.txt", "wb"))
train_loader = pickle.load(open("savedata/train_loaded.txt", "rb"))
test_loader = pickle.load(open("savedata/test_loaded.txt", "rb"))
dev_loader = pickle.load(open("savedata/dev_loaded.txt", "rb"))
"""### model"""
############################## PARAMETERS ######################################
_hyperparameters_dict = {
"batch_size": 64,
"num_epochs": 10, # 10,
"max_len": 250,
"embedding_size": 128, # 256,
"rnn_size": 256, # 1024,
"learning_algo": "adam",
"learning_rate": 0.001,
"max_grad_norm": 5.0
}
class RNN(nn.Module):
def __init__(self, vocab_size: int, char_embedding_size: int,
rnn_size: int):
super().__init__()
self.vocab_size = vocab_size
self.char_embedding_size = char_embedding_size
self.rnn_size = rnn_size
self.dropout = nn.Dropout(p=0.3)
# instantiate Modules with the correct arguments
self.embedding = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=char_embedding_size)
self.rnn = nn.LSTM(input_size=char_embedding_size,
hidden_size=rnn_size, bidirectional=True)
# self.rnn_cell = nn.GRUCell(input_size = char_embedding_size,
# hidden_size = rnn_size)
self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)
# self.softmax = nn.Softmax(dim = 2)
self.loss = nn.CrossEntropyLoss()
def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):
"""
Computes loss for a batch of sequences. The sequence loss is the
average of the individual losses at each timestep. The batch loss is
the average of sequence losses across all batches.
:param logits: unnormalized probabilities for T timesteps, size
batch_size x max_timesteps x vocab_size
:param y: ground truth values (index of correct characters), size
batch_size x max_timesteps
:returns: loss as a scalar
"""
#
# logits: B x T x vocab_size
# B x T
# cross entropy: B x vocab_size x T
# B x T
# vision: B x num_classes
# B
return self.loss(logits, y)
def get_logits(self, hidden_states: torch.FloatTensor,
temperature: float = 1.0):
"""
Computes the unnormalized probabilities from hidden states. Optionally
divide logits by a temperature, in order to influence predictions at
test time (https://www.quora.com/What-is-Temperature-in-LSTM)
:param hidden_states: tensor of size batch_size x timesteps x rnn_size
:param temperature: coefficient that scales outputs before turning them
to probabilities. A low temperature (0.1) results in more conservative
predictions, while a higher temperature (0.9) results in more diverse
predictions
:return: tensor of size batch_size x timesteps x vocab_size
"""
return self.logits(hidden_states) / temperature
def forward(self, batch: torch.LongTensor,
hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:
"""
Computes the hidden states for the current batch (x, y).
:param x: input of size batch_size x max_len
:param hidden_start: hidden state at time step t = 0,
size batch_size x rnn_size
:return: hidden states at all timesteps,
size batch_size x timesteps x rnn_size
"""
# max_len = x.size(1)
# x,label = batch
# batch_size x max_len x embedding_dim
x_embedded = self.embedding(batch)
# x_drop = self.dropout
x_drop = self.dropout(x_embedded)
# compute hidden states and logits for each time step
# hidden_states_list = []
# prev_hidden = hidden_start
hidden_state = self.rnn(x_drop)[0]
# print(hidden_state)
# print(hidden_state[0].shape)
# print(hidden_state[1].shape)
# hidden_state = hidden_state.permute(2,1,0)
# hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])
# hidden_state_maxPooled = hidden_state.permute(2,1,0)
hidden_state_pooled, _ = torch.max(hidden_state, dim=1)
output = self.get_logits(hidden_state_pooled)
# Loss = self.loss(output, y)
# hidden_state = softmax(logits(hidden_state))
# batch_size x max_len x rnn_size
# hidden_states = torch.stack(hidden_states_list, dim=1)
return output
# instantiate the RNNLM module
network = RNN(vocab.size(),
_hyperparameters_dict['embedding_size'],
_hyperparameters_dict['rnn_size'])
# if torch.cuda.is_available():
# device = torch.device('cuda:0')
# else:
# device = torch.device('cpu')
# move network to GPU if available
# network = network.to(device)
# device = torch.device('cpu')
# network = network.to(device)
optimizer = Adam(params=network.parameters(), lr=0.001)
# CHECKPOINT: make sure you understand each parameter size
print("Neural network parameters: ")
for param_name, param in network.named_parameters():
print("\t" + param_name, " size: ", param.size())
"""# Training/evaluation loop"""
# Commented out IPython magic to ensure Python compatibility.
class Trainer:
def __init__(self, model: nn.Module,
train_data: torch.LongTensor, | self.train_data = train_data
self.dev_data = dev_data
self.vocab = vocab
# self.device = torch.device('cuda:0')
if hyperparams['learning_algo'] == 'adam':
self.optimizer = Adam(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
else:
self.optimizer = SGD(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
self.num_epochs = hyperparams['num_epochs']
self.max_len = hyperparams['max_len']
self.batch_size = hyperparams['batch_size']
self.rnn_size = hyperparams['rnn_size']
self.max_grad_norm = hyperparams['max_grad_norm']
# number of characters in training/dev data
self.train_size = len(train_data)
self.dev_size = len(dev_data)
# number of sequences (X, Y) used for training
self.num_train_examples = \
self.train_size // (self.batch_size * self.max_len) * self.batch_size
def train_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the training set
:param epoch_num: number of current epoch
"""
self.model.train()
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size)
# for batch_num, (x, y) in enumerate(make_batches(self.train_data,
# self.batch_size,
# self.max_len)):
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients in train epoch
self.optimizer.zero_grad()
x = len(batch_tuple[0])
y = len(batch_tuple[0][0])
# compute hidden states
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# compute unnormalized probabilities
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
# compute loss
# scalar
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
# backpropagation (gradient of loss wrt parameters)
batch_loss.backward()
# clip gradients if they get too large
torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),
self.max_grad_norm)
# update parameters
self.optimizer.step()
# we use a stateful RNN, which means the first hidden state for the
# next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:] # add comment
if batch_num % 100 == 0:
print("epoch %d, %d/%d examples, batch loss = %f"
% (epoch_num, (batch_num + 1) * self.batch_size,
self.num_train_examples, batch_loss.item()))
epoch_loss /= (batch_num + 1)
return epoch_loss
def eval_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the validation set
:param epoch_num: number of current epoch
"""
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)
with torch.no_grad():
# for batch_num, (x, y) in enumerate(make_batches(self.dev_data,
# self.batch_size,
# self.max_len)):
acc = 0;
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients
# self.optimizer.zero_grad()
# x = len(batch_tuple[0])
# y = len(batch_tuple[0][0])
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
hidden_states_m = torch.argmax(hidden_states, dim=1)
acc += sum(hidden_states_m == y).item()
# we use a stateful RNN, which means the first hidden state for
# the next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:]
epoch_loss /= (batch_num + 1)
return epoch_loss, acc
def train(self) -> Dict:
train_losses, dev_losses, dev_acc = [], [], []
for epoch in range(self.num_epochs):
epoch_train_loss = self.train_epoch(epoch)
epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)
train_losses.append(epoch_train_loss)
dev_losses.append(epoch_dev_loss)
dev_acc.append(epoch_dev_train)
return {"train_losses": train_losses,
"dev_losses": dev_losses,
"dev_acc": epoch_dev_train}
def plot_losses(metrics: Dict):
"""
Plots training/validation losses.
:param metrics: dictionar
"""
plt.figure()
plt.plot(metrics['train_losses'], c='b', label='Train')
plt.plot(metrics['dev_losses'], c='g', label='Valid')
plt.ylabel('Loss')
plt.xlabel('Iteration')
plt.legend()
plt.show()
# op= torch.rand(4)
# thx = torch.rand(4)
# thx[0] = op[0]
# t = thx==op
# print(t)
# print(sum(t).item())
# train network for some epoch
trainer = Trainer(network, train_loader, dev_loader, vocab, _hyperparameters_dict)
metrics = trainer.train()
# plot training and validations losses each epoch
plot_losses(metrics)
# for i in train_loader:
# print(len(i[0][0]))
# print(len(i[0]))
# print(i[0])
# x = 1
# while (True)
# x = 0 | dev_data: torch.LongTensor,
vocab: Vocabulary,
hyperparams: Dict):
self.model = model | random_line_split |
PropDetCode.py | # -*- coding: utf-8 -*-
import pickle
import pathlib
from pathlib import Path
from typing import List, Tuple, Dict
import numpy as np
import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.utils.data import Dataset, DataLoader
from torchtext.data import get_tokenizer
from matplotlib import pyplot as plt
"""### **Preprocesare**"""
def read_data(directory):
ids = []
texts = []
labels = []
for f in directory.glob('*.txt'):
id = f.name.replace('article', '').replace('.txt', '')
ids.append(id)
texts.append(f.read_text('utf8'))
labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))
# labels can be empty
return ids, texts, labels
def parse_label(label_path):
labels = []
f = Path(label_path)
if not f.exists():
return labels
for line in open(label_path):
parts = line.strip().split('\t')
labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])
labels = sorted(labels)
if labels:
length = max([label[1] for label in labels])
visit = np.zeros(length)
res = []
for label in labels:
if sum(visit[label[0]:label[1]]):
label[3] = 1
else:
visit[label[0]:label[1]] = 1
res.append(label)
return res
else:
return labels
def clean_text(articles, ids):
texts = []
for article, id in zip(articles, ids):
sentences = article.split('\n')
end = -1
res = []
for sentence in sentences:
start = end + 1
end = start + len(sentence) # length of sequence
if sentence != "": # if not empty line
res.append([id, sentence, start, end])
texts.append(res)
return texts
def make_dataset(texts, lbls):
txt = []
lbl = []
for text, label in zip(texts, lbls):
for Text in text:
txt.append(Text[1])
k = 0
for l in label:
if Text[2] < l[0] < Text[3]:
lbl.append(1)
k = 1
break
elif Text[2] < l[1] < Text[3]:
lbl.append(1)
k = 1
break
if k == 0:
lbl.append(0)
return txt, lbl
directory = pathlib.Path('data/protechn_corpus_eval/train')
ids, texts,lbl = read_data(directory)
ids_train = ids
texts_train = texts
lbl_train = lbl
directory = pathlib.Path('data/protechn_corpus_eval/test')
ids_test, texts_test,lbl_test = read_data(directory)
directory = pathlib.Path('data/protechn_corpus_eval/dev')
ids_dev, texts_dev,lbl_dev = read_data(directory)
txt_train = clean_text(texts_train, ids_train)
txt_test = clean_text(texts_test, ids_test)
txt_dev =clean_text(texts_dev, ids_dev)
train_txt, train_lbl = make_dataset(txt_train, lbl_train)
test_txt, test_lbl = make_dataset(txt_test, lbl_test)
dev_txt, dev_lbl = make_dataset(txt_dev, lbl_dev)
pickle.dump([dev_txt,dev_lbl], open("savedata/dev.txt", "wb"))
pickle.dump([test_txt,test_lbl], open("savedata/test.txt", "wb"))
pickle.dump([train_txt,train_lbl], open("savedata/train.txt", "wb"))
train_txt, train_lbl = pickle.load(open("savedata/train.txt", "rb"))
test_txt, test_lbl = pickle.load(open("savedata/test.txt", "rb"))
dev_txt, dev_lbl = pickle.load(open("savedata/dev.txt", "rb"))
"""### **Dataset+ data_loader**"""
class Vocabulary:
"""
Helper class that maps words to unique indices and the other way around
"""
def __init__(self, tokens: List[str]):
# dictionary that maps words to indices
|
def get_token_at_index(self, idx: int):
return self.idx_to_word[idx]
def get_index_of_token(self, token: str):
return self.word_to_idx[token]
def size(self):
return len(self.word_to_idx)
class PropagandaDataset(Dataset):
def __init__(self,
fold: str,
examples: List[str],
labels: List[int],
vocab: Vocabulary):
"""
:type vocab: object
:param fold: 'train'/'eval'/'test'
:param examples: List of sentences/paragraphs
:param labels: List of labels (1 if propaganda, 0 otherwise)
"""
self.fold = fold
self.examples = examples
self.labels = labels
self.vocab = vocab
def __getitem__(self, index: int) -> (torch.Tensor, torch.Tensor):
"""
This function converts an example to a Tensor containing the indices
:param index: position of example to be retrieved.
"""
# retrieve sentence and label (correct class index)
example, label = self.examples[index], self.labels[index]
# tokenize sentence into words and other symbols
tokenizer = get_tokenizer("spacy")
tokens = tokenizer(example)
# convert tokens to their corresponding indices, according to
# vocabulary
token_indices = []
for i in tokens:
token_indices.append(self.vocab.get_index_of_token(i))
return torch.LongTensor(token_indices), torch.LongTensor(label)
def __len__(self):
"""
Return the size of this dataset. This is given by the number
of sentences.
"""
return len(self.examples)
def collate_sentences(batch: List[Tuple]):
"""
This function converts a list of batch_size examples to
a Tensor of size batch_size x max_len
batch: [(example_1_tensor, example_1_label),
...
(example_batch_size_tensor, example_batch_size_label)]
"""
# fill this list with all the labels in the batch
batch_labels = []
# we need to find the maximum length of a sentence in this batch
max_len = 0
for i in batch:
if len(i[0]) > max_len:
max_len = len(i[0])
batch_size = len(batch)
# print('batch size',batch_size)
# initialize a Tensor filled with zeros (aka index of <PAD>)
batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)
# fill each row idx in batch_sentences with the corresponding
# sequence tensor
#
# ... batch_sentences[idx, ...] = ...
for idx in range(0, batch_size):
# print(idx)
# print(len(batch[idx][0]))
# print(len(batch_sentences[idx]))
batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]
print(batch[idx])
batch_labels.append(batch[idx][1])
# print(batch_sentences[idx])
print(type(batch_labels))
# batch_labels = [torch.LongTensor(x) for x in batch_labels]
batch_labels = torch.tensor(batch_labels)
# print(batch_labels)
return batch_sentences, batch_labels
def fill_vocab(txt: List[Tuple]):
tokenizer = get_tokenizer("spacy")
list_v = []
for i in txt:
tok = tokenizer(i)
for j in tok:
if list_v.count(j) == 0:
list_v.append(j)
vocab = Vocabulary(tokens=list_v)
return vocab
full_text = train_txt + dev_txt
vocab = fill_vocab(full_text)
test_vocab = fill_vocab(test_txt)
train_vocab = fill_vocab(train_txt)
dev_vocab = fill_vocab(dev_txt)
pickle.dump(dev_vocab, open("savedata/dev_vocab.txt", "wb"))
pickle.dump(test_vocab, open("savedata/test_vocab.txt", "wb"))
pickle.dump(train_vocab, open("savedata/train_vocab.txt", "wb"))
pickle.dump(vocab, open("savedata/vocab.txt", "wb"))
dev_vocab = pickle.load(open("savedata/dev_vocab.txt","rb"))
test_vocab = pickle.load(open("savedata/test_vocab.txt","rb"))
train_vocab = pickle.load(open("savedata/train_vocab.txt","rb"))
vocab = pickle.load(open("savedata/vocab.txt", "rb"))
dataset_train = PropagandaDataset('train', train_txt, train_lbl, train_vocab)
train_loader = DataLoader(dataset_train, batch_size=16, collate_fn=collate_sentences)
dataset_test = PropagandaDataset('train', test_txt, test_lbl, test_vocab)
test_loader = DataLoader(dataset_test, batch_size=16, collate_fn=collate_sentences)
dataset_dev = PropagandaDataset('train', dev_txt, dev_lbl, dev_vocab)
dev_loader = DataLoader(dataset_dev, batch_size=16, collate_fn=collate_sentences)
pickle.dump(train_loader, open("savedata/train_loaded.txt", "wb"))
pickle.dump(test_loader, open("savedata/test_loaded.txt", "wb"))
pickle.dump(dev_loader, open("savedata/dev_loaded.txt", "wb"))
train_loader = pickle.load(open("savedata/train_loaded.txt", "rb"))
test_loader = pickle.load(open("savedata/test_loaded.txt", "rb"))
dev_loader = pickle.load(open("savedata/dev_loaded.txt", "rb"))
"""### model"""
############################## PARAMETERS ######################################
_hyperparameters_dict = {
"batch_size": 64,
"num_epochs": 10, # 10,
"max_len": 250,
"embedding_size": 128, # 256,
"rnn_size": 256, # 1024,
"learning_algo": "adam",
"learning_rate": 0.001,
"max_grad_norm": 5.0
}
class RNN(nn.Module):
def __init__(self, vocab_size: int, char_embedding_size: int,
rnn_size: int):
super().__init__()
self.vocab_size = vocab_size
self.char_embedding_size = char_embedding_size
self.rnn_size = rnn_size
self.dropout = nn.Dropout(p=0.3)
# instantiate Modules with the correct arguments
self.embedding = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=char_embedding_size)
self.rnn = nn.LSTM(input_size=char_embedding_size,
hidden_size=rnn_size, bidirectional=True)
# self.rnn_cell = nn.GRUCell(input_size = char_embedding_size,
# hidden_size = rnn_size)
self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)
# self.softmax = nn.Softmax(dim = 2)
self.loss = nn.CrossEntropyLoss()
def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):
"""
Computes loss for a batch of sequences. The sequence loss is the
average of the individual losses at each timestep. The batch loss is
the average of sequence losses across all batches.
:param logits: unnormalized probabilities for T timesteps, size
batch_size x max_timesteps x vocab_size
:param y: ground truth values (index of correct characters), size
batch_size x max_timesteps
:returns: loss as a scalar
"""
#
# logits: B x T x vocab_size
# B x T
# cross entropy: B x vocab_size x T
# B x T
# vision: B x num_classes
# B
return self.loss(logits, y)
def get_logits(self, hidden_states: torch.FloatTensor,
temperature: float = 1.0):
"""
Computes the unnormalized probabilities from hidden states. Optionally
divide logits by a temperature, in order to influence predictions at
test time (https://www.quora.com/What-is-Temperature-in-LSTM)
:param hidden_states: tensor of size batch_size x timesteps x rnn_size
:param temperature: coefficient that scales outputs before turning them
to probabilities. A low temperature (0.1) results in more conservative
predictions, while a higher temperature (0.9) results in more diverse
predictions
:return: tensor of size batch_size x timesteps x vocab_size
"""
return self.logits(hidden_states) / temperature
def forward(self, batch: torch.LongTensor,
hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:
"""
Computes the hidden states for the current batch (x, y).
:param x: input of size batch_size x max_len
:param hidden_start: hidden state at time step t = 0,
size batch_size x rnn_size
:return: hidden states at all timesteps,
size batch_size x timesteps x rnn_size
"""
# max_len = x.size(1)
# x,label = batch
# batch_size x max_len x embedding_dim
x_embedded = self.embedding(batch)
# x_drop = self.dropout
x_drop = self.dropout(x_embedded)
# compute hidden states and logits for each time step
# hidden_states_list = []
# prev_hidden = hidden_start
hidden_state = self.rnn(x_drop)[0]
# print(hidden_state)
# print(hidden_state[0].shape)
# print(hidden_state[1].shape)
# hidden_state = hidden_state.permute(2,1,0)
# hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])
# hidden_state_maxPooled = hidden_state.permute(2,1,0)
hidden_state_pooled, _ = torch.max(hidden_state, dim=1)
output = self.get_logits(hidden_state_pooled)
# Loss = self.loss(output, y)
# hidden_state = softmax(logits(hidden_state))
# batch_size x max_len x rnn_size
# hidden_states = torch.stack(hidden_states_list, dim=1)
return output
# instantiate the RNNLM module
network = RNN(vocab.size(),
_hyperparameters_dict['embedding_size'],
_hyperparameters_dict['rnn_size'])
# if torch.cuda.is_available():
# device = torch.device('cuda:0')
# else:
# device = torch.device('cpu')
# move network to GPU if available
# network = network.to(device)
# device = torch.device('cpu')
# network = network.to(device)
optimizer = Adam(params=network.parameters(), lr=0.001)
# CHECKPOINT: make sure you understand each parameter size
print("Neural network parameters: ")
for param_name, param in network.named_parameters():
print("\t" + param_name, " size: ", param.size())
"""# Training/evaluation loop"""
# Commented out IPython magic to ensure Python compatibility.
class Trainer:
def __init__(self, model: nn.Module,
train_data: torch.LongTensor,
dev_data: torch.LongTensor,
vocab: Vocabulary,
hyperparams: Dict):
self.model = model
self.train_data = train_data
self.dev_data = dev_data
self.vocab = vocab
# self.device = torch.device('cuda:0')
if hyperparams['learning_algo'] == 'adam':
self.optimizer = Adam(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
else:
self.optimizer = SGD(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
self.num_epochs = hyperparams['num_epochs']
self.max_len = hyperparams['max_len']
self.batch_size = hyperparams['batch_size']
self.rnn_size = hyperparams['rnn_size']
self.max_grad_norm = hyperparams['max_grad_norm']
# number of characters in training/dev data
self.train_size = len(train_data)
self.dev_size = len(dev_data)
# number of sequences (X, Y) used for training
self.num_train_examples = \
self.train_size // (self.batch_size * self.max_len) * self.batch_size
def train_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the training set
:param epoch_num: number of current epoch
"""
self.model.train()
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size)
# for batch_num, (x, y) in enumerate(make_batches(self.train_data,
# self.batch_size,
# self.max_len)):
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients in train epoch
self.optimizer.zero_grad()
x = len(batch_tuple[0])
y = len(batch_tuple[0][0])
# compute hidden states
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# compute unnormalized probabilities
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
# compute loss
# scalar
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
# backpropagation (gradient of loss wrt parameters)
batch_loss.backward()
# clip gradients if they get too large
torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),
self.max_grad_norm)
# update parameters
self.optimizer.step()
# we use a stateful RNN, which means the first hidden state for the
# next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:] # add comment
if batch_num % 100 == 0:
print("epoch %d, %d/%d examples, batch loss = %f"
% (epoch_num, (batch_num + 1) * self.batch_size,
self.num_train_examples, batch_loss.item()))
epoch_loss /= (batch_num + 1)
return epoch_loss
def eval_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the validation set
:param epoch_num: number of current epoch
"""
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)
with torch.no_grad():
# for batch_num, (x, y) in enumerate(make_batches(self.dev_data,
# self.batch_size,
# self.max_len)):
acc = 0;
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients
# self.optimizer.zero_grad()
# x = len(batch_tuple[0])
# y = len(batch_tuple[0][0])
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
hidden_states_m = torch.argmax(hidden_states, dim=1)
acc += sum(hidden_states_m == y).item()
# we use a stateful RNN, which means the first hidden state for
# the next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:]
epoch_loss /= (batch_num + 1)
return epoch_loss, acc
def train(self) -> Dict:
train_losses, dev_losses, dev_acc = [], [], []
for epoch in range(self.num_epochs):
epoch_train_loss = self.train_epoch(epoch)
epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)
train_losses.append(epoch_train_loss)
dev_losses.append(epoch_dev_loss)
dev_acc.append(epoch_dev_train)
return {"train_losses": train_losses,
"dev_losses": dev_losses,
"dev_acc": epoch_dev_train}
def plot_losses(metrics: Dict):
"""
Plots training/validation losses.
:param metrics: dictionar
"""
plt.figure()
plt.plot(metrics['train_losses'], c='b', label='Train')
plt.plot(metrics['dev_losses'], c='g', label='Valid')
plt.ylabel('Loss')
plt.xlabel('Iteration')
plt.legend()
plt.show()
# op= torch.rand(4)
# thx = torch.rand(4)
# thx[0] = op[0]
# t = thx==op
# print(t)
# print(sum(t).item())
# train network for some epoch
trainer = Trainer(network, train_loader, dev_loader, vocab, _hyperparameters_dict)
metrics = trainer.train()
# plot training and validations losses each epoch
plot_losses(metrics)
# for i in train_loader:
# print(len(i[0][0]))
# print(len(i[0]))
# print(i[0])
# x = 1
# while (True)
# x = 0
| self.word_to_idx = {'<PAD>': 0}
for idx, tok in enumerate(tokens, 1):
self.word_to_idx[tok] = idx
# dictionary that maps indices to words
self.idx_to_word = {}
for tok, idx in self.word_to_idx.items():
self.idx_to_word[idx] = tok | identifier_body |
deduction_engine_extended.py | """
This module contains the rule-based inference (rulebased_deduction engine)
"""
import itertools
from collections import defaultdict
from itertools import chain
from excut.explanations_mining.descriptions import dump_explanations_to_file
from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file
from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended
from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure
from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended
from excut.kg.kg_indexing import Indexer
from excut.kg.utils.data_formating import n3_repr
from excut.utils.logging import logger
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.clustering import target_entities as tes
class Prediction:
"""
An object to represent the prediction of the rules
:ivar triple: the predicted triple
:ivar all_sources: all rules that predicted the same triple
"""
# def __init__(self, triple: tuple, source_description=Description(), all_sources=None):
def __init__(self, triple=None, sources=None):
self.triple = triple
# self.source_description = source_descriptionf
self.all_sources = sources if sources else list() # sources if sources else {source_description}
def get_subject(self):
return self.triple[0]
def get_object(self):
return self.triple[2]
def get_quality(self, measure='x_coverage', method=max):
# return self.source_description.get_quality(measure)
return method([source.get_quality(measure) for source in self.all_sources])
def get_main_description(self, measure='x_coverage', method=max):
return method(self.all_sources, key=lambda d: d.get_quality(measure))
def __str__(self):
return str(self.triple) + '<<' + str(self.get_main_description())
def __repr__(self):
return "%s\t(\t%s,%s)" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources))
def __eq__(self, other):
return other.triple == self.triple
def __hash__(self):
return hash(self.triple)
class DeductionEngine():
"""
Abstract rulebased_deduction/inference engine.
"""
def __init__(self, **kwargs):
pass
def infer(self, descriptions, recursive=False, topk=-1):
pass
class SparqlBasedDeductionEngineExtended(DeductionEngine):
"""
Deduction engine that converts the rules to sparql and fire them over the KG.
The rule-based_deduction takes care of consolidating similar predictions
"""
def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max):
"""
:param kg_query_interface: interface for the KG.
:param relation: the relation used in the predicted triple (optional)
:param quality: objective quality measure for ranking the predictions (optional) by default
the exclusive coverage of the rules is used
:param quality_aggregation: the methd used for aggregating the score if multiple rules infers the same fact
(optional) by default max is used.
"""
super(SparqlBasedDeductionEngineExtended, self).__init__()
self.relation = relation
self.query_executer = kg_query_interface
self.quality = quality
self.quality_aggregation = quality_aggregation
self.labels_indexer=Indexer(store=kg_query_interface.type,
endpoint=kg_query_interface.endpoint,
graph= kg_query_interface.labels_graph,
identifier=kg_query_interface.labels_identifier)
def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None,
clear_target_entities=True):
"""
Infer new facts for a giving set of descriptions
:param descriptions_list: list of explantions/descriptions rules
:param target_entities: entities and their labels for which predictions are generated
:param min_quality: minimum aggregated quality for the predictions
:param topk: k *distinct* highest quality predictions per entity,
:param output_filepath: predictions output file.
:param clear_target_entities: clear indexed target entities after done inference
:return: dictionary of predicted entity-clusters assignments
"""
if isinstance(descriptions_list,dict):
descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values()))
if target_entities:
self.labels_indexer.index_triples(target_entities)
self.relation=target_entities.get_relation()
predictions = list(map(self._infer_single, descriptions_list))
per_entity_predictions = self.consolidate(predictions)
per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk)
if output_filepath:
dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True,
with_description=False, quality=self.quality)
if target_entities and clear_target_entities:
self.labels_indexer.drop()
return per_entity_predictions
def consolidate(self, predictions):
|
def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):
"""
Merge the the inferred facts in case of functional predicates
:param per_entity_prediction:
:return:
"""
def quality_method(p):
return p.get_quality(self.quality, self.quality_aggregation)
per_entity_prediction_filtered = defaultdict(list)
for sub, per_obj_predictions in per_entity_prediction.items():
# print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])
merged_predictions = list(
filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))
merged_predictions.sort(key=quality_method, reverse=True)
include = topk if topk > 0 else len(merged_predictions)
per_entity_prediction_filtered[sub] = merged_predictions[:include]
return per_entity_prediction_filtered
def _infer_single(self, description: Description2):
"""
Infer new facts for the given Description
:param description:
:return:
"""
bindings = self.query_executer.get_arguments_bindings(description,
restriction_pattern=Description2(body=[Atom('?x',
self.relation,
'?z')]))
head = description.head
# only supports p(?x,CONSTANT)
predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]
return predictions
def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,
with_description=False, quality='x_coverage'):
"""
Writes the predictions to two files, the first is human readable and the other with .parsable extension that can be
parsed in python.
:param per_var_predictions:
:param out_filepath:
:param triple_format:
:param topk:
:param with_weight:
:param with_description:
:return:
"""
out_file_parsable = out_filepath + '.parsable'
out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) > 0 else '')
with open(out_filepath_with_type, 'w') as out_file:
for var, predictions in per_var_predictions.items():
if topk > 0:
predictions = predictions[:topk]
for p in predictions:
if triple_format:
# I only output normalized_coverage
out_str = n3_repr(p.triple) + ('\t%f' % p.get_quality(quality) if with_weight else '') + (
'\t%s' % p.source_description if with_description else '')
else:
out_str = str(p)
out_file.write(out_str)
out_file.write('\n')
with open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else ''), 'w') as out_file:
out_file.write('\n'.join(
map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))
return out_filepath_with_type
if __name__ == '__main__':
target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')
vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',
['http://yago-expr.org', 'http://yago-expr.org.types'],
labels_identifier='http://yago-expr.org.labels')
explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})
explans=explainer.explain(target_entities,
output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')
ded = SparqlBasedDeductionEngineExtended(vos_executer)
per_var_predictions = ded.infer(explans, target_entities,
output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')
logger.info("Total variables with predictions subjects: %i", len(per_var_predictions))
| """
Combine predictions from different rules
:param predictions: list of generated predictions
:return: combined single prediction with several sources for equivalent predictions
:rtype: dict
"""
# per_var_predictions = defaultdict(lambda: defaultdict(list))
# for p in chain.from_iterable(predictions):
# per_var_predictions[p.get_subject()][p.get_object()].append(p)
per_entity_predictions = defaultdict(lambda: defaultdict(Prediction))
for p in list(chain.from_iterable(predictions)):
cons_pred = per_entity_predictions[p.get_subject()][p.get_object()]
cons_pred.triple = p.triple
cons_pred.all_sources += p.all_sources
return per_entity_predictions | identifier_body |
deduction_engine_extended.py | """
This module contains the rule-based inference (rulebased_deduction engine)
"""
import itertools
from collections import defaultdict
from itertools import chain
from excut.explanations_mining.descriptions import dump_explanations_to_file
from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file
from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended
from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure
from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended
from excut.kg.kg_indexing import Indexer
from excut.kg.utils.data_formating import n3_repr
from excut.utils.logging import logger
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.clustering import target_entities as tes
class Prediction:
"""
An object to represent the prediction of the rules
:ivar triple: the predicted triple
:ivar all_sources: all rules that predicted the same triple
"""
# def __init__(self, triple: tuple, source_description=Description(), all_sources=None):
def __init__(self, triple=None, sources=None):
self.triple = triple
# self.source_description = source_descriptionf
self.all_sources = sources if sources else list() # sources if sources else {source_description}
def get_subject(self):
return self.triple[0]
def get_object(self):
return self.triple[2]
def get_quality(self, measure='x_coverage', method=max):
# return self.source_description.get_quality(measure)
return method([source.get_quality(measure) for source in self.all_sources])
def get_main_description(self, measure='x_coverage', method=max):
return method(self.all_sources, key=lambda d: d.get_quality(measure))
def __str__(self):
return str(self.triple) + '<<' + str(self.get_main_description())
def __repr__(self):
return "%s\t(\t%s,%s)" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources))
def | (self, other):
return other.triple == self.triple
def __hash__(self):
return hash(self.triple)
class DeductionEngine():
"""
Abstract rulebased_deduction/inference engine.
"""
def __init__(self, **kwargs):
pass
def infer(self, descriptions, recursive=False, topk=-1):
pass
class SparqlBasedDeductionEngineExtended(DeductionEngine):
"""
Deduction engine that converts the rules to sparql and fire them over the KG.
The rule-based_deduction takes care of consolidating similar predictions
"""
def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max):
"""
:param kg_query_interface: interface for the KG.
:param relation: the relation used in the predicted triple (optional)
:param quality: objective quality measure for ranking the predictions (optional) by default
the exclusive coverage of the rules is used
:param quality_aggregation: the methd used for aggregating the score if multiple rules infers the same fact
(optional) by default max is used.
"""
super(SparqlBasedDeductionEngineExtended, self).__init__()
self.relation = relation
self.query_executer = kg_query_interface
self.quality = quality
self.quality_aggregation = quality_aggregation
self.labels_indexer=Indexer(store=kg_query_interface.type,
endpoint=kg_query_interface.endpoint,
graph= kg_query_interface.labels_graph,
identifier=kg_query_interface.labels_identifier)
def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None,
clear_target_entities=True):
"""
Infer new facts for a giving set of descriptions
:param descriptions_list: list of explantions/descriptions rules
:param target_entities: entities and their labels for which predictions are generated
:param min_quality: minimum aggregated quality for the predictions
:param topk: k *distinct* highest quality predictions per entity,
:param output_filepath: predictions output file.
:param clear_target_entities: clear indexed target entities after done inference
:return: dictionary of predicted entity-clusters assignments
"""
if isinstance(descriptions_list,dict):
descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values()))
if target_entities:
self.labels_indexer.index_triples(target_entities)
self.relation=target_entities.get_relation()
predictions = list(map(self._infer_single, descriptions_list))
per_entity_predictions = self.consolidate(predictions)
per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk)
if output_filepath:
dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True,
with_description=False, quality=self.quality)
if target_entities and clear_target_entities:
self.labels_indexer.drop()
return per_entity_predictions
def consolidate(self, predictions):
"""
Combine predictions from different rules
:param predictions: list of generated predictions
:return: combined single prediction with several sources for equivalent predictions
:rtype: dict
"""
# per_var_predictions = defaultdict(lambda: defaultdict(list))
# for p in chain.from_iterable(predictions):
# per_var_predictions[p.get_subject()][p.get_object()].append(p)
per_entity_predictions = defaultdict(lambda: defaultdict(Prediction))
for p in list(chain.from_iterable(predictions)):
cons_pred = per_entity_predictions[p.get_subject()][p.get_object()]
cons_pred.triple = p.triple
cons_pred.all_sources += p.all_sources
return per_entity_predictions
def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):
"""
Merge the the inferred facts in case of functional predicates
:param per_entity_prediction:
:return:
"""
def quality_method(p):
return p.get_quality(self.quality, self.quality_aggregation)
per_entity_prediction_filtered = defaultdict(list)
for sub, per_obj_predictions in per_entity_prediction.items():
# print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])
merged_predictions = list(
filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))
merged_predictions.sort(key=quality_method, reverse=True)
include = topk if topk > 0 else len(merged_predictions)
per_entity_prediction_filtered[sub] = merged_predictions[:include]
return per_entity_prediction_filtered
def _infer_single(self, description: Description2):
"""
Infer new facts for the given Description
:param description:
:return:
"""
bindings = self.query_executer.get_arguments_bindings(description,
restriction_pattern=Description2(body=[Atom('?x',
self.relation,
'?z')]))
head = description.head
# only supports p(?x,CONSTANT)
predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]
return predictions
def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,
with_description=False, quality='x_coverage'):
"""
Writes the predictions to two files, the first is human readable and the other with .parsable extension that can be
parsed in python.
:param per_var_predictions:
:param out_filepath:
:param triple_format:
:param topk:
:param with_weight:
:param with_description:
:return:
"""
out_file_parsable = out_filepath + '.parsable'
out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) > 0 else '')
with open(out_filepath_with_type, 'w') as out_file:
for var, predictions in per_var_predictions.items():
if topk > 0:
predictions = predictions[:topk]
for p in predictions:
if triple_format:
# I only output normalized_coverage
out_str = n3_repr(p.triple) + ('\t%f' % p.get_quality(quality) if with_weight else '') + (
'\t%s' % p.source_description if with_description else '')
else:
out_str = str(p)
out_file.write(out_str)
out_file.write('\n')
with open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else ''), 'w') as out_file:
out_file.write('\n'.join(
map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))
return out_filepath_with_type
if __name__ == '__main__':
target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')
vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',
['http://yago-expr.org', 'http://yago-expr.org.types'],
labels_identifier='http://yago-expr.org.labels')
explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})
explans=explainer.explain(target_entities,
output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')
ded = SparqlBasedDeductionEngineExtended(vos_executer)
per_var_predictions = ded.infer(explans, target_entities,
output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')
logger.info("Total variables with predictions subjects: %i", len(per_var_predictions))
| __eq__ | identifier_name |
deduction_engine_extended.py | """
This module contains the rule-based inference (rulebased_deduction engine)
"""
import itertools
from collections import defaultdict
from itertools import chain
from excut.explanations_mining.descriptions import dump_explanations_to_file
from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file
from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended
from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure
from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended
from excut.kg.kg_indexing import Indexer
from excut.kg.utils.data_formating import n3_repr
from excut.utils.logging import logger
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.clustering import target_entities as tes
class Prediction:
"""
An object to represent the prediction of the rules
:ivar triple: the predicted triple
:ivar all_sources: all rules that predicted the same triple
"""
# def __init__(self, triple: tuple, source_description=Description(), all_sources=None):
def __init__(self, triple=None, sources=None):
self.triple = triple
# self.source_description = source_descriptionf
self.all_sources = sources if sources else list() # sources if sources else {source_description}
def get_subject(self):
return self.triple[0]
def get_object(self):
return self.triple[2]
def get_quality(self, measure='x_coverage', method=max):
# return self.source_description.get_quality(measure)
return method([source.get_quality(measure) for source in self.all_sources])
def get_main_description(self, measure='x_coverage', method=max):
return method(self.all_sources, key=lambda d: d.get_quality(measure))
def __str__(self):
return str(self.triple) + '<<' + str(self.get_main_description())
def __repr__(self):
return "%s\t(\t%s,%s)" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources))
def __eq__(self, other):
return other.triple == self.triple
def __hash__(self):
return hash(self.triple)
class DeductionEngine():
"""
Abstract rulebased_deduction/inference engine.
"""
def __init__(self, **kwargs):
pass
def infer(self, descriptions, recursive=False, topk=-1):
pass
class SparqlBasedDeductionEngineExtended(DeductionEngine):
"""
Deduction engine that converts the rules to sparql and fire them over the KG.
The rule-based_deduction takes care of consolidating similar predictions
"""
def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max):
"""
:param kg_query_interface: interface for the KG.
:param relation: the relation used in the predicted triple (optional)
:param quality: objective quality measure for ranking the predictions (optional) by default
the exclusive coverage of the rules is used
:param quality_aggregation: the methd used for aggregating the score if multiple rules infers the same fact
(optional) by default max is used.
"""
super(SparqlBasedDeductionEngineExtended, self).__init__()
self.relation = relation
self.query_executer = kg_query_interface
self.quality = quality
self.quality_aggregation = quality_aggregation
self.labels_indexer=Indexer(store=kg_query_interface.type,
endpoint=kg_query_interface.endpoint,
graph= kg_query_interface.labels_graph,
identifier=kg_query_interface.labels_identifier)
def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None,
clear_target_entities=True):
"""
Infer new facts for a giving set of descriptions
:param descriptions_list: list of explantions/descriptions rules
:param target_entities: entities and their labels for which predictions are generated
:param min_quality: minimum aggregated quality for the predictions
:param topk: k *distinct* highest quality predictions per entity,
:param output_filepath: predictions output file.
:param clear_target_entities: clear indexed target entities after done inference
:return: dictionary of predicted entity-clusters assignments
"""
if isinstance(descriptions_list,dict):
descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values()))
if target_entities:
self.labels_indexer.index_triples(target_entities)
self.relation=target_entities.get_relation()
predictions = list(map(self._infer_single, descriptions_list))
per_entity_predictions = self.consolidate(predictions)
per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk)
if output_filepath:
|
if target_entities and clear_target_entities:
self.labels_indexer.drop()
return per_entity_predictions
def consolidate(self, predictions):
"""
Combine predictions from different rules
:param predictions: list of generated predictions
:return: combined single prediction with several sources for equivalent predictions
:rtype: dict
"""
# per_var_predictions = defaultdict(lambda: defaultdict(list))
# for p in chain.from_iterable(predictions):
# per_var_predictions[p.get_subject()][p.get_object()].append(p)
per_entity_predictions = defaultdict(lambda: defaultdict(Prediction))
for p in list(chain.from_iterable(predictions)):
cons_pred = per_entity_predictions[p.get_subject()][p.get_object()]
cons_pred.triple = p.triple
cons_pred.all_sources += p.all_sources
return per_entity_predictions
def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):
"""
Merge the the inferred facts in case of functional predicates
:param per_entity_prediction:
:return:
"""
def quality_method(p):
return p.get_quality(self.quality, self.quality_aggregation)
per_entity_prediction_filtered = defaultdict(list)
for sub, per_obj_predictions in per_entity_prediction.items():
# print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])
merged_predictions = list(
filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))
merged_predictions.sort(key=quality_method, reverse=True)
include = topk if topk > 0 else len(merged_predictions)
per_entity_prediction_filtered[sub] = merged_predictions[:include]
return per_entity_prediction_filtered
def _infer_single(self, description: Description2):
"""
Infer new facts for the given Description
:param description:
:return:
"""
bindings = self.query_executer.get_arguments_bindings(description,
restriction_pattern=Description2(body=[Atom('?x',
self.relation,
'?z')]))
head = description.head
# only supports p(?x,CONSTANT)
predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]
return predictions
def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,
with_description=False, quality='x_coverage'):
"""
Writes the predictions to two files, the first is human readable and the other with .parsable extension that can be
parsed in python.
:param per_var_predictions:
:param out_filepath:
:param triple_format:
:param topk:
:param with_weight:
:param with_description:
:return:
"""
out_file_parsable = out_filepath + '.parsable'
out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) > 0 else '')
with open(out_filepath_with_type, 'w') as out_file:
for var, predictions in per_var_predictions.items():
if topk > 0:
predictions = predictions[:topk]
for p in predictions:
if triple_format:
# I only output normalized_coverage
out_str = n3_repr(p.triple) + ('\t%f' % p.get_quality(quality) if with_weight else '') + (
'\t%s' % p.source_description if with_description else '')
else:
out_str = str(p)
out_file.write(out_str)
out_file.write('\n')
with open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else ''), 'w') as out_file:
out_file.write('\n'.join(
map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))
return out_filepath_with_type
if __name__ == '__main__':
target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')
vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',
['http://yago-expr.org', 'http://yago-expr.org.types'],
labels_identifier='http://yago-expr.org.labels')
explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})
explans=explainer.explain(target_entities,
output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')
ded = SparqlBasedDeductionEngineExtended(vos_executer)
per_var_predictions = ded.infer(explans, target_entities,
output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')
logger.info("Total variables with predictions subjects: %i", len(per_var_predictions))
| dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True,
with_description=False, quality=self.quality) | conditional_block |
deduction_engine_extended.py | """
This module contains the rule-based inference (rulebased_deduction engine)
"""
import itertools
from collections import defaultdict
from itertools import chain
from excut.explanations_mining.descriptions import dump_explanations_to_file
from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file
from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended
from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure
from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended
from excut.kg.kg_indexing import Indexer
from excut.kg.utils.data_formating import n3_repr
from excut.utils.logging import logger
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.clustering import target_entities as tes
class Prediction:
"""
An object to represent the prediction of the rules
:ivar triple: the predicted triple
:ivar all_sources: all rules that predicted the same triple
"""
# def __init__(self, triple: tuple, source_description=Description(), all_sources=None):
def __init__(self, triple=None, sources=None):
self.triple = triple
# self.source_description = source_descriptionf
self.all_sources = sources if sources else list() # sources if sources else {source_description}
def get_subject(self):
return self.triple[0]
def get_object(self):
return self.triple[2]
def get_quality(self, measure='x_coverage', method=max):
# return self.source_description.get_quality(measure)
return method([source.get_quality(measure) for source in self.all_sources])
def get_main_description(self, measure='x_coverage', method=max):
return method(self.all_sources, key=lambda d: d.get_quality(measure))
def __str__(self):
return str(self.triple) + '<<' + str(self.get_main_description())
def __repr__(self):
return "%s\t(\t%s,%s)" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources))
def __eq__(self, other):
return other.triple == self.triple
def __hash__(self):
return hash(self.triple)
class DeductionEngine():
"""
Abstract rulebased_deduction/inference engine.
"""
def __init__(self, **kwargs):
pass
def infer(self, descriptions, recursive=False, topk=-1):
pass
class SparqlBasedDeductionEngineExtended(DeductionEngine):
"""
Deduction engine that converts the rules to sparql and fire them over the KG.
The rule-based_deduction takes care of consolidating similar predictions
"""
def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max):
"""
:param kg_query_interface: interface for the KG.
:param relation: the relation used in the predicted triple (optional)
:param quality: objective quality measure for ranking the predictions (optional) by default
the exclusive coverage of the rules is used
:param quality_aggregation: the methd used for aggregating the score if multiple rules infers the same fact
(optional) by default max is used.
"""
super(SparqlBasedDeductionEngineExtended, self).__init__()
self.relation = relation
self.query_executer = kg_query_interface
self.quality = quality
self.quality_aggregation = quality_aggregation
self.labels_indexer=Indexer(store=kg_query_interface.type,
endpoint=kg_query_interface.endpoint,
graph= kg_query_interface.labels_graph,
identifier=kg_query_interface.labels_identifier)
def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None,
clear_target_entities=True):
"""
Infer new facts for a giving set of descriptions
:param descriptions_list: list of explantions/descriptions rules
:param target_entities: entities and their labels for which predictions are generated
:param min_quality: minimum aggregated quality for the predictions
:param topk: k *distinct* highest quality predictions per entity,
:param output_filepath: predictions output file.
:param clear_target_entities: clear indexed target entities after done inference
:return: dictionary of predicted entity-clusters assignments
"""
if isinstance(descriptions_list,dict):
descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values()))
if target_entities:
self.labels_indexer.index_triples(target_entities)
self.relation=target_entities.get_relation()
predictions = list(map(self._infer_single, descriptions_list))
per_entity_predictions = self.consolidate(predictions)
per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk)
if output_filepath:
dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True,
with_description=False, quality=self.quality)
if target_entities and clear_target_entities:
self.labels_indexer.drop()
return per_entity_predictions
def consolidate(self, predictions):
"""
Combine predictions from different rules
:param predictions: list of generated predictions
:return: combined single prediction with several sources for equivalent predictions
:rtype: dict
"""
# per_var_predictions = defaultdict(lambda: defaultdict(list))
# for p in chain.from_iterable(predictions):
# per_var_predictions[p.get_subject()][p.get_object()].append(p)
per_entity_predictions = defaultdict(lambda: defaultdict(Prediction))
for p in list(chain.from_iterable(predictions)):
cons_pred = per_entity_predictions[p.get_subject()][p.get_object()]
cons_pred.triple = p.triple
cons_pred.all_sources += p.all_sources
return per_entity_predictions
def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):
"""
Merge the the inferred facts in case of functional predicates
:param per_entity_prediction:
:return:
"""
def quality_method(p):
return p.get_quality(self.quality, self.quality_aggregation)
per_entity_prediction_filtered = defaultdict(list)
for sub, per_obj_predictions in per_entity_prediction.items():
# print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])
merged_predictions = list(
filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))
merged_predictions.sort(key=quality_method, reverse=True)
include = topk if topk > 0 else len(merged_predictions)
per_entity_prediction_filtered[sub] = merged_predictions[:include]
return per_entity_prediction_filtered
def _infer_single(self, description: Description2):
"""
Infer new facts for the given Description
:param description:
:return:
"""
bindings = self.query_executer.get_arguments_bindings(description,
restriction_pattern=Description2(body=[Atom('?x',
self.relation,
'?z')]))
head = description.head
# only supports p(?x,CONSTANT)
predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]
return predictions
def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,
with_description=False, quality='x_coverage'):
"""
Writes the predictions to two files, the first is human readable and the other with .parsable extension that can be
parsed in python.
:param per_var_predictions:
:param out_filepath:
:param triple_format:
:param topk:
:param with_weight: | :param with_description:
:return:
"""
out_file_parsable = out_filepath + '.parsable'
out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) > 0 else '')
with open(out_filepath_with_type, 'w') as out_file:
for var, predictions in per_var_predictions.items():
if topk > 0:
predictions = predictions[:topk]
for p in predictions:
if triple_format:
# I only output normalized_coverage
out_str = n3_repr(p.triple) + ('\t%f' % p.get_quality(quality) if with_weight else '') + (
'\t%s' % p.source_description if with_description else '')
else:
out_str = str(p)
out_file.write(out_str)
out_file.write('\n')
with open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else ''), 'w') as out_file:
out_file.write('\n'.join(
map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))
return out_filepath_with_type
if __name__ == '__main__':
target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')
vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',
['http://yago-expr.org', 'http://yago-expr.org.types'],
labels_identifier='http://yago-expr.org.labels')
explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})
explans=explainer.explain(target_entities,
output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')
ded = SparqlBasedDeductionEngineExtended(vos_executer)
per_var_predictions = ded.infer(explans, target_entities,
output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')
logger.info("Total variables with predictions subjects: %i", len(per_var_predictions)) | random_line_split | |
system.rs | use crate::simulation::agent_shader::ty::PushConstantData;
use crate::simulation::blur_fade_shader;
use crate::simulation::Simulation;
use imgui::{Context, Ui};
use imgui_vulkano_renderer::Renderer;
use imgui_winit_support::{HiDpiMode, WinitPlatform};
use std::sync::Arc;
use std::time::{Duration, Instant};
use vulkano::command_buffer::AutoCommandBufferBuilder;
use vulkano::device::{Device, DeviceExtensions, Queue};
use vulkano::image::{ImageUsage, SwapchainImage};
use vulkano::instance::{Instance, PhysicalDevice};
use vulkano::swapchain;
use vulkano::swapchain::{
AcquireError, ColorSpace, FullscreenExclusive, PresentMode, Surface, SurfaceTransform,
Swapchain, SwapchainCreationError,
};
use vulkano::sync;
use vulkano::sync::{FlushError, GpuFuture};
use vulkano_win::VkSurfaceBuild;
use winit::event::{Event, WindowEvent};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::window::{Window, WindowBuilder};
pub struct System {
pub event_loop: EventLoop<()>,
pub device: Arc<Device>,
pub queue: Arc<Queue>,
pub surface: Arc<Surface<Window>>,
pub swapchain: Arc<Swapchain<Window>>,
pub images: Vec<Arc<SwapchainImage<Window>>>,
pub imgui: Context,
pub platform: WinitPlatform,
pub renderer: Renderer,
}
impl System {
pub fn init(window_title: &str) -> System {
// Basic commands taken from the vulkano imgui examples:
// https://github.com/Tenebryo/imgui-vulkano-renderer/blob/master/examples/support/mod.rs
let instance = {
let extensions = vulkano_win::required_extensions();
Instance::new(None, &extensions, None).expect("Failed to create instance.")
};
let physical = PhysicalDevice::enumerate(&instance)
.next()
.expect("No device available");
let event_loop = EventLoop::new();
let surface = WindowBuilder::new()
.with_title(window_title.to_owned())
.with_inner_size(winit::dpi::PhysicalSize {
width: 2000,
height: 1400,
})
.build_vk_surface(&event_loop, instance.clone())
.unwrap();
let queue_family = physical
.queue_families()
.find(|&q|
q.supports_graphics() && q.explicitly_supports_transfers()
&& surface.is_supported(q).unwrap_or(false)
)
.expect("Device does not have a queue family that can draw to the window and supports transfers.");
let (device, mut queues) = {
let device_ext = DeviceExtensions {
khr_swapchain: true,
// Needed for compute shaders.
khr_storage_buffer_storage_class: true,
..DeviceExtensions::none()
};
Device::new(
physical,
physical.supported_features(),
&device_ext,
[(queue_family, 0.5)].iter().cloned(),
)
.expect("Failed to create device")
};
let queue = queues.next().unwrap();
let format;
let (swapchain, images) = {
let caps = surface
.capabilities(physical)
.expect("Failed to get capabilities.");
format = caps.supported_formats[0].0;
let dimensions = caps.current_extent.unwrap_or([1280, 1024]);
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
let image_usage = ImageUsage {
transfer_destination: true,
..ImageUsage::color_attachment()
};
Swapchain::new(
device.clone(),
surface.clone(),
caps.min_image_count,
format,
dimensions,
1,
image_usage,
&queue,
SurfaceTransform::Identity,
alpha,
PresentMode::Fifo,
FullscreenExclusive::Default,
true,
ColorSpace::SrgbNonLinear,
)
.expect("Failed to create swapchain")
};
let mut imgui = Context::create();
imgui.set_ini_filename(None);
let mut platform = WinitPlatform::init(&mut imgui);
platform.attach_window(imgui.io_mut(), &surface.window(), HiDpiMode::Rounded);
let renderer = Renderer::init(&mut imgui, device.clone(), queue.clone(), format)
.expect("Failed to initialize renderer");
System {
event_loop,
device,
queue,
surface,
swapchain,
images,
imgui,
platform,
renderer,
}
}
pub fn main_loop<
F: FnMut(
&mut bool,
&mut PushConstantData,
&mut blur_fade_shader::ty::PushConstantData,
&mut Ui,
) + 'static,
>(
self,
simulation: Simulation,
mut run_ui: F,
) {
let System {
event_loop,
device,
queue,
surface,
mut swapchain,
mut images,
mut imgui,
mut platform,
mut renderer,
..
} = self;
// Apparently there are various reasons why we might need to re-create the swapchain.
// For example when the target surface has changed size.
// This keeps track of whether the previous frame encountered one of those reasons.
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let mut last_redraw = Instant::now();
let mut sim_parameters: PushConstantData = PushConstantData {
// Pixels per second.
agent_speed: 100.0,
// Radians per second.
agent_turn_speed: 50.0, | sensor_radius: 1,
// In the range [0 - PI]
sensor_angle_spacing: 0.18,
// Seconds per frame. (60fps)
delta_time: 0.016667,
};
let mut fade_parameters: blur_fade_shader::ty::PushConstantData =
blur_fade_shader::ty::PushConstantData {
// Seconds per frame. (60fps)
delta_time: 0.016667,
evaporate_speed: 0.9,
};
// target 60 fps
let target_frame_time = Duration::from_millis(1000 / 60);
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::MainEventsCleared => {
platform
.prepare_frame(imgui.io_mut(), &surface.window())
.expect("Failed to prepare frame.");
surface.window().request_redraw();
}
Event::RedrawRequested(_) => {
// ---- Stick to the framerate ----
let t = Instant::now();
let since_last = t.duration_since(last_redraw);
last_redraw = t;
if since_last < target_frame_time {
std::thread::sleep(target_frame_time - since_last);
}
// ---- Cleanup ----
previous_frame_end.as_mut().unwrap().cleanup_finished();
// ---- Recreate swapchain if necessary ----
if recreate_swapchain {
let dimensions: [u32; 2] = surface.window().inner_size().into();
let (new_swapchain, new_images) =
match swapchain.recreate_with_dimensions(dimensions) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => return,
Err(e) => panic!("Failed to recreate swapchain: {:?}", e),
};
images = new_images;
swapchain = new_swapchain;
recreate_swapchain = false;
}
// ---- Run the user's imgui code ----
let mut ui = imgui.frame();
let mut run = true;
run_ui(&mut run, &mut sim_parameters, &mut fade_parameters, &mut ui);
if !run {
*control_flow = ControlFlow::Exit;
}
// ---- Create draw commands ----
let (image_num, suboptimal, acquire_future) =
match swapchain::acquire_next_image(swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {:?}", e),
};
if suboptimal {
recreate_swapchain = true;
}
platform.prepare_render(&ui, surface.window());
let draw_data = ui.render();
let extent_x = simulation
.result_image
.dimensions()
.width()
.min(images[image_num].dimensions()[0]);
let extent_y = simulation
.result_image
.dimensions()
.height()
.min(images[image_num].dimensions()[1]);
let mut cmd_buf_builder =
AutoCommandBufferBuilder::new(device.clone(), queue.family())
.expect("Failed to create command buffer");
cmd_buf_builder
.clear_color_image(images[image_num].clone(), [0.0; 4].into())
.unwrap();
cmd_buf_builder
.copy_image(
simulation.result_image.clone(),
[0; 3],
0,
0,
images[image_num].clone(),
[0; 3],
0,
0,
[extent_x, extent_y, 1],
1,
)
.expect("Failed to create image copy command");
renderer
.draw_commands(
&mut cmd_buf_builder,
queue.clone(),
images[image_num].clone(),
draw_data,
)
.expect("Rendering failed");
let cmd_buf = cmd_buf_builder
.build()
.expect("Failed to build command buffer");
// ---- Execute the draw commands ----
let (buffer_1, buffer_2, buffer_3) =
simulation.create_command_buffers(&sim_parameters, &fade_parameters);
let future = previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(queue.clone(), buffer_1)
.unwrap()
.then_execute(queue.clone(), buffer_2)
.unwrap()
.then_execute(queue.clone(), buffer_3)
.unwrap()
.then_execute(queue.clone(), cmd_buf)
.unwrap()
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
previous_frame_end = Some(future.boxed());
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {:?}", e);
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
event => {
// Pass events on to imgui.
platform.handle_event(imgui.io_mut(), surface.window(), &event);
}
}
});
}
} | random_line_split | |
system.rs | use crate::simulation::agent_shader::ty::PushConstantData;
use crate::simulation::blur_fade_shader;
use crate::simulation::Simulation;
use imgui::{Context, Ui};
use imgui_vulkano_renderer::Renderer;
use imgui_winit_support::{HiDpiMode, WinitPlatform};
use std::sync::Arc;
use std::time::{Duration, Instant};
use vulkano::command_buffer::AutoCommandBufferBuilder;
use vulkano::device::{Device, DeviceExtensions, Queue};
use vulkano::image::{ImageUsage, SwapchainImage};
use vulkano::instance::{Instance, PhysicalDevice};
use vulkano::swapchain;
use vulkano::swapchain::{
AcquireError, ColorSpace, FullscreenExclusive, PresentMode, Surface, SurfaceTransform,
Swapchain, SwapchainCreationError,
};
use vulkano::sync;
use vulkano::sync::{FlushError, GpuFuture};
use vulkano_win::VkSurfaceBuild;
use winit::event::{Event, WindowEvent};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::window::{Window, WindowBuilder};
pub struct System {
pub event_loop: EventLoop<()>,
pub device: Arc<Device>,
pub queue: Arc<Queue>,
pub surface: Arc<Surface<Window>>,
pub swapchain: Arc<Swapchain<Window>>,
pub images: Vec<Arc<SwapchainImage<Window>>>,
pub imgui: Context,
pub platform: WinitPlatform,
pub renderer: Renderer,
}
impl System {
pub fn init(window_title: &str) -> System |
pub fn main_loop<
F: FnMut(
&mut bool,
&mut PushConstantData,
&mut blur_fade_shader::ty::PushConstantData,
&mut Ui,
) + 'static,
>(
self,
simulation: Simulation,
mut run_ui: F,
) {
let System {
event_loop,
device,
queue,
surface,
mut swapchain,
mut images,
mut imgui,
mut platform,
mut renderer,
..
} = self;
// Apparently there are various reasons why we might need to re-create the swapchain.
// For example when the target surface has changed size.
// This keeps track of whether the previous frame encountered one of those reasons.
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let mut last_redraw = Instant::now();
let mut sim_parameters: PushConstantData = PushConstantData {
// Pixels per second.
agent_speed: 100.0,
// Radians per second.
agent_turn_speed: 50.0,
sensor_radius: 1,
// In the range [0 - PI]
sensor_angle_spacing: 0.18,
// Seconds per frame. (60fps)
delta_time: 0.016667,
};
let mut fade_parameters: blur_fade_shader::ty::PushConstantData =
blur_fade_shader::ty::PushConstantData {
// Seconds per frame. (60fps)
delta_time: 0.016667,
evaporate_speed: 0.9,
};
// target 60 fps
let target_frame_time = Duration::from_millis(1000 / 60);
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::MainEventsCleared => {
platform
.prepare_frame(imgui.io_mut(), &surface.window())
.expect("Failed to prepare frame.");
surface.window().request_redraw();
}
Event::RedrawRequested(_) => {
// ---- Stick to the framerate ----
let t = Instant::now();
let since_last = t.duration_since(last_redraw);
last_redraw = t;
if since_last < target_frame_time {
std::thread::sleep(target_frame_time - since_last);
}
// ---- Cleanup ----
previous_frame_end.as_mut().unwrap().cleanup_finished();
// ---- Recreate swapchain if necessary ----
if recreate_swapchain {
let dimensions: [u32; 2] = surface.window().inner_size().into();
let (new_swapchain, new_images) =
match swapchain.recreate_with_dimensions(dimensions) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => return,
Err(e) => panic!("Failed to recreate swapchain: {:?}", e),
};
images = new_images;
swapchain = new_swapchain;
recreate_swapchain = false;
}
// ---- Run the user's imgui code ----
let mut ui = imgui.frame();
let mut run = true;
run_ui(&mut run, &mut sim_parameters, &mut fade_parameters, &mut ui);
if !run {
*control_flow = ControlFlow::Exit;
}
// ---- Create draw commands ----
let (image_num, suboptimal, acquire_future) =
match swapchain::acquire_next_image(swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {:?}", e),
};
if suboptimal {
recreate_swapchain = true;
}
platform.prepare_render(&ui, surface.window());
let draw_data = ui.render();
let extent_x = simulation
.result_image
.dimensions()
.width()
.min(images[image_num].dimensions()[0]);
let extent_y = simulation
.result_image
.dimensions()
.height()
.min(images[image_num].dimensions()[1]);
let mut cmd_buf_builder =
AutoCommandBufferBuilder::new(device.clone(), queue.family())
.expect("Failed to create command buffer");
cmd_buf_builder
.clear_color_image(images[image_num].clone(), [0.0; 4].into())
.unwrap();
cmd_buf_builder
.copy_image(
simulation.result_image.clone(),
[0; 3],
0,
0,
images[image_num].clone(),
[0; 3],
0,
0,
[extent_x, extent_y, 1],
1,
)
.expect("Failed to create image copy command");
renderer
.draw_commands(
&mut cmd_buf_builder,
queue.clone(),
images[image_num].clone(),
draw_data,
)
.expect("Rendering failed");
let cmd_buf = cmd_buf_builder
.build()
.expect("Failed to build command buffer");
// ---- Execute the draw commands ----
let (buffer_1, buffer_2, buffer_3) =
simulation.create_command_buffers(&sim_parameters, &fade_parameters);
let future = previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(queue.clone(), buffer_1)
.unwrap()
.then_execute(queue.clone(), buffer_2)
.unwrap()
.then_execute(queue.clone(), buffer_3)
.unwrap()
.then_execute(queue.clone(), cmd_buf)
.unwrap()
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
previous_frame_end = Some(future.boxed());
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {:?}", e);
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
event => {
// Pass events on to imgui.
platform.handle_event(imgui.io_mut(), surface.window(), &event);
}
}
});
}
}
| {
// Basic commands taken from the vulkano imgui examples:
// https://github.com/Tenebryo/imgui-vulkano-renderer/blob/master/examples/support/mod.rs
let instance = {
let extensions = vulkano_win::required_extensions();
Instance::new(None, &extensions, None).expect("Failed to create instance.")
};
let physical = PhysicalDevice::enumerate(&instance)
.next()
.expect("No device available");
let event_loop = EventLoop::new();
let surface = WindowBuilder::new()
.with_title(window_title.to_owned())
.with_inner_size(winit::dpi::PhysicalSize {
width: 2000,
height: 1400,
})
.build_vk_surface(&event_loop, instance.clone())
.unwrap();
let queue_family = physical
.queue_families()
.find(|&q|
q.supports_graphics() && q.explicitly_supports_transfers()
&& surface.is_supported(q).unwrap_or(false)
)
.expect("Device does not have a queue family that can draw to the window and supports transfers.");
let (device, mut queues) = {
let device_ext = DeviceExtensions {
khr_swapchain: true,
// Needed for compute shaders.
khr_storage_buffer_storage_class: true,
..DeviceExtensions::none()
};
Device::new(
physical,
physical.supported_features(),
&device_ext,
[(queue_family, 0.5)].iter().cloned(),
)
.expect("Failed to create device")
};
let queue = queues.next().unwrap();
let format;
let (swapchain, images) = {
let caps = surface
.capabilities(physical)
.expect("Failed to get capabilities.");
format = caps.supported_formats[0].0;
let dimensions = caps.current_extent.unwrap_or([1280, 1024]);
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
let image_usage = ImageUsage {
transfer_destination: true,
..ImageUsage::color_attachment()
};
Swapchain::new(
device.clone(),
surface.clone(),
caps.min_image_count,
format,
dimensions,
1,
image_usage,
&queue,
SurfaceTransform::Identity,
alpha,
PresentMode::Fifo,
FullscreenExclusive::Default,
true,
ColorSpace::SrgbNonLinear,
)
.expect("Failed to create swapchain")
};
let mut imgui = Context::create();
imgui.set_ini_filename(None);
let mut platform = WinitPlatform::init(&mut imgui);
platform.attach_window(imgui.io_mut(), &surface.window(), HiDpiMode::Rounded);
let renderer = Renderer::init(&mut imgui, device.clone(), queue.clone(), format)
.expect("Failed to initialize renderer");
System {
event_loop,
device,
queue,
surface,
swapchain,
images,
imgui,
platform,
renderer,
}
} | identifier_body |
system.rs | use crate::simulation::agent_shader::ty::PushConstantData;
use crate::simulation::blur_fade_shader;
use crate::simulation::Simulation;
use imgui::{Context, Ui};
use imgui_vulkano_renderer::Renderer;
use imgui_winit_support::{HiDpiMode, WinitPlatform};
use std::sync::Arc;
use std::time::{Duration, Instant};
use vulkano::command_buffer::AutoCommandBufferBuilder;
use vulkano::device::{Device, DeviceExtensions, Queue};
use vulkano::image::{ImageUsage, SwapchainImage};
use vulkano::instance::{Instance, PhysicalDevice};
use vulkano::swapchain;
use vulkano::swapchain::{
AcquireError, ColorSpace, FullscreenExclusive, PresentMode, Surface, SurfaceTransform,
Swapchain, SwapchainCreationError,
};
use vulkano::sync;
use vulkano::sync::{FlushError, GpuFuture};
use vulkano_win::VkSurfaceBuild;
use winit::event::{Event, WindowEvent};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::window::{Window, WindowBuilder};
pub struct System {
pub event_loop: EventLoop<()>,
pub device: Arc<Device>,
pub queue: Arc<Queue>,
pub surface: Arc<Surface<Window>>,
pub swapchain: Arc<Swapchain<Window>>,
pub images: Vec<Arc<SwapchainImage<Window>>>,
pub imgui: Context,
pub platform: WinitPlatform,
pub renderer: Renderer,
}
impl System {
pub fn init(window_title: &str) -> System {
// Basic commands taken from the vulkano imgui examples:
// https://github.com/Tenebryo/imgui-vulkano-renderer/blob/master/examples/support/mod.rs
let instance = {
let extensions = vulkano_win::required_extensions();
Instance::new(None, &extensions, None).expect("Failed to create instance.")
};
let physical = PhysicalDevice::enumerate(&instance)
.next()
.expect("No device available");
let event_loop = EventLoop::new();
let surface = WindowBuilder::new()
.with_title(window_title.to_owned())
.with_inner_size(winit::dpi::PhysicalSize {
width: 2000,
height: 1400,
})
.build_vk_surface(&event_loop, instance.clone())
.unwrap();
let queue_family = physical
.queue_families()
.find(|&q|
q.supports_graphics() && q.explicitly_supports_transfers()
&& surface.is_supported(q).unwrap_or(false)
)
.expect("Device does not have a queue family that can draw to the window and supports transfers.");
let (device, mut queues) = {
let device_ext = DeviceExtensions {
khr_swapchain: true,
// Needed for compute shaders.
khr_storage_buffer_storage_class: true,
..DeviceExtensions::none()
};
Device::new(
physical,
physical.supported_features(),
&device_ext,
[(queue_family, 0.5)].iter().cloned(),
)
.expect("Failed to create device")
};
let queue = queues.next().unwrap();
let format;
let (swapchain, images) = {
let caps = surface
.capabilities(physical)
.expect("Failed to get capabilities.");
format = caps.supported_formats[0].0;
let dimensions = caps.current_extent.unwrap_or([1280, 1024]);
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
let image_usage = ImageUsage {
transfer_destination: true,
..ImageUsage::color_attachment()
};
Swapchain::new(
device.clone(),
surface.clone(),
caps.min_image_count,
format,
dimensions,
1,
image_usage,
&queue,
SurfaceTransform::Identity,
alpha,
PresentMode::Fifo,
FullscreenExclusive::Default,
true,
ColorSpace::SrgbNonLinear,
)
.expect("Failed to create swapchain")
};
let mut imgui = Context::create();
imgui.set_ini_filename(None);
let mut platform = WinitPlatform::init(&mut imgui);
platform.attach_window(imgui.io_mut(), &surface.window(), HiDpiMode::Rounded);
let renderer = Renderer::init(&mut imgui, device.clone(), queue.clone(), format)
.expect("Failed to initialize renderer");
System {
event_loop,
device,
queue,
surface,
swapchain,
images,
imgui,
platform,
renderer,
}
}
pub fn | <
F: FnMut(
&mut bool,
&mut PushConstantData,
&mut blur_fade_shader::ty::PushConstantData,
&mut Ui,
) + 'static,
>(
self,
simulation: Simulation,
mut run_ui: F,
) {
let System {
event_loop,
device,
queue,
surface,
mut swapchain,
mut images,
mut imgui,
mut platform,
mut renderer,
..
} = self;
// Apparently there are various reasons why we might need to re-create the swapchain.
// For example when the target surface has changed size.
// This keeps track of whether the previous frame encountered one of those reasons.
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let mut last_redraw = Instant::now();
let mut sim_parameters: PushConstantData = PushConstantData {
// Pixels per second.
agent_speed: 100.0,
// Radians per second.
agent_turn_speed: 50.0,
sensor_radius: 1,
// In the range [0 - PI]
sensor_angle_spacing: 0.18,
// Seconds per frame. (60fps)
delta_time: 0.016667,
};
let mut fade_parameters: blur_fade_shader::ty::PushConstantData =
blur_fade_shader::ty::PushConstantData {
// Seconds per frame. (60fps)
delta_time: 0.016667,
evaporate_speed: 0.9,
};
// target 60 fps
let target_frame_time = Duration::from_millis(1000 / 60);
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::MainEventsCleared => {
platform
.prepare_frame(imgui.io_mut(), &surface.window())
.expect("Failed to prepare frame.");
surface.window().request_redraw();
}
Event::RedrawRequested(_) => {
// ---- Stick to the framerate ----
let t = Instant::now();
let since_last = t.duration_since(last_redraw);
last_redraw = t;
if since_last < target_frame_time {
std::thread::sleep(target_frame_time - since_last);
}
// ---- Cleanup ----
previous_frame_end.as_mut().unwrap().cleanup_finished();
// ---- Recreate swapchain if necessary ----
if recreate_swapchain {
let dimensions: [u32; 2] = surface.window().inner_size().into();
let (new_swapchain, new_images) =
match swapchain.recreate_with_dimensions(dimensions) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => return,
Err(e) => panic!("Failed to recreate swapchain: {:?}", e),
};
images = new_images;
swapchain = new_swapchain;
recreate_swapchain = false;
}
// ---- Run the user's imgui code ----
let mut ui = imgui.frame();
let mut run = true;
run_ui(&mut run, &mut sim_parameters, &mut fade_parameters, &mut ui);
if !run {
*control_flow = ControlFlow::Exit;
}
// ---- Create draw commands ----
let (image_num, suboptimal, acquire_future) =
match swapchain::acquire_next_image(swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {:?}", e),
};
if suboptimal {
recreate_swapchain = true;
}
platform.prepare_render(&ui, surface.window());
let draw_data = ui.render();
let extent_x = simulation
.result_image
.dimensions()
.width()
.min(images[image_num].dimensions()[0]);
let extent_y = simulation
.result_image
.dimensions()
.height()
.min(images[image_num].dimensions()[1]);
let mut cmd_buf_builder =
AutoCommandBufferBuilder::new(device.clone(), queue.family())
.expect("Failed to create command buffer");
cmd_buf_builder
.clear_color_image(images[image_num].clone(), [0.0; 4].into())
.unwrap();
cmd_buf_builder
.copy_image(
simulation.result_image.clone(),
[0; 3],
0,
0,
images[image_num].clone(),
[0; 3],
0,
0,
[extent_x, extent_y, 1],
1,
)
.expect("Failed to create image copy command");
renderer
.draw_commands(
&mut cmd_buf_builder,
queue.clone(),
images[image_num].clone(),
draw_data,
)
.expect("Rendering failed");
let cmd_buf = cmd_buf_builder
.build()
.expect("Failed to build command buffer");
// ---- Execute the draw commands ----
let (buffer_1, buffer_2, buffer_3) =
simulation.create_command_buffers(&sim_parameters, &fade_parameters);
let future = previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(queue.clone(), buffer_1)
.unwrap()
.then_execute(queue.clone(), buffer_2)
.unwrap()
.then_execute(queue.clone(), buffer_3)
.unwrap()
.then_execute(queue.clone(), cmd_buf)
.unwrap()
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
previous_frame_end = Some(future.boxed());
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {:?}", e);
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
event => {
// Pass events on to imgui.
platform.handle_event(imgui.io_mut(), surface.window(), &event);
}
}
});
}
}
| main_loop | identifier_name |
dl_ocr_API.py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 15:24:56 2018
@author: kboosam
"""
'''
@@ API TO CAPTURE THE DRIVING LICENSE DETAILS FROM GOOGLE VISION API
'''
# Importing libraries
#import pandas as pd
from flask import Flask, jsonify, request
import logging
from flask_cors import CORS
#import numpy as np
from raven.contrib.flask import Sentry ## Sentry logging
#import requests
import json
import http.client
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
import io
import re, os
from random import randint
import urllib.request as req
##
## FUNCTION TO CALL GGOGLE VISION API WITH THE DL IMAGE
##
def DL_OCR_VISION(path):
try:
## First download the file for Google Vision API call
img_loc = "DL_tmp_"+str(randint(100001, 199999))+".jpg"
req.urlretrieve(path, img_loc)
print('---> Image file downloaded at:', img_loc)
client = vision.ImageAnnotatorClient()
''' for remote image - it didn't work as google rejected accessing FB images
image = types.Image()
image.source.image_uri = path
'''
# THIS IS FOR LOCAL FILE
with io.open(img_loc, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('-------> Calling google vision API conplete')
## Delete the downloaded image file
os.remove(img_loc)
ret_text = ''
#if response.error==:
#print('Texts:', texts)
for text in texts:
ret_text += text.description
#print(text , type(text))
#ret_text.replace('\n',' ') # replace new line charachters
ret_text = ' '.join(ret_text.split())
except Exception as e:
print(e)
print('Error occured while calling the google vision API - 105')
return ret_text ## retunrs a string of all text from the driving license
####
### FUNCTION TO PARSE THE TEXTS returned from Vision API to a DL object
####
'''
DL OBject structure
{
DLN : <drivers license number>,
DLN_valid: <False / True>
DOB : <bate of birth as str>,
EXP_DT : <exp date as str>,
address: {
add_ln1: <line 1>,
add_ln2: <line 2>,
city: <city>,
state: <state code>,
zip: <zip 5 or 5+4>
},
verified: <False/True> "valid address or not"
}
'''
def parse_DL(full_text):
print('full text - ', full_text)
## Remove non-ascii characters that are inserted by google vision sometimes.
all_ascii = ''.join(char for char in full_text if ord(char) < 128)
if full_text != all_ascii :
print('### ---- ### Non-ascii charachters removed from text', all_ascii)
full_text = all_ascii
state = ' ' ## Initialize
if full_text.count('Texas') or full_text.count('TX') > 0 : state = 'TX'
if full_text.count('Sunshine') > 0 and full_text.count('FL') : state='FL'
if full_text.count('Jes') > 0 and full_text.count('White') : state = 'IL'
if full_text.count('visitPA') > 0 : state='PA'
if full_text.count('WISCON') > 0 : state='WI'
if full_text.count('CALIF') > 0 : state='CA'
if full_text.count('ALABAMA') > 0 : state='AL'
if state in ['TX', 'PA', 'IL', 'WI']:
full_text = full_text.replace(' 1 ',' ') # replace FIELD LABELS
full_text = full_text.replace(' 2 ',' ') # replace FIELD LABELS
full_text = full_text.replace(' 8 ',' ') # replace FIELD LABELS
if state=='TX' :
full_text = full_text.replace(' 3 ',' ')
full_text = full_text.replace(' 4b ',' ') # replace FIELD LABELS
full_text = full_text.replace('\n',' ')
else:
full_text = full_text.replace('\n',' ')
#### Call Smarty Streets API to find address from text
try:
conn = http.client.HTTPSConnection("us-extract.api.smartystreets.com")
payload = full_text #send full text
headers = {
'content-type': "text/plain",
'host': "us-extract.api.smartystreets.com",
'cache-control': "no-cache"
}
conn.request("POST", "/?auth-id=eff0b523-c528-0292-6685-6ad2c5a6e92a&auth-token=V7pWleHG8yLUS8CC7NqQ", payload, headers)
SSresp = conn.getresponse()
print('---->Call to SmartyStreets successful: ', SSresp)
except Exception as e:
print('###@@@@### Error occured while calling the SmartyStreets API for address extraction')
print(e)
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
try:
SSresp = json.loads(SSresp.read())
print ('\n\n ---> Response from SmartyStreets', SSresp)
verified = SSresp['addresses'][0]['verified'] # address validity
if not verified : ## Checking if the address is valid
postal_address = {
"add_ln1":SSresp['addresses'][0]['text']
}
# when address is not valid we are just sending the identified address string in the line 1
print('Address on DL is invalid:', SSresp['addresses'][0]['text'] )
else:
#extract the address object
address = SSresp['addresses'][0]['api_output'][0]
## fomulate address
postal_address = {
"add_ln1": address['delivery_line_1'],
"add_ln2": '',
"city": address['components']['city_name'],
"state": address['components']['state_abbreviation'],
"zip": address['components']['zipcode'] + '-' + address['components']['plus4_code']
}
state = address['components']['state_abbreviation'] # get state code for all other work.
### END OF IF ELSE STRUCTURE
except Exception as e:
print(e)
print('###@@@@### Error occured while building address from SmartyStreets API response')
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
## make a continuous string without spaces by concatenating all individual texts from google
full_str = ''.join(full_text.split())
print('---->Fetching DLN; Address state is:', state)
# get DL number for IL
if state == 'IL':
# IL DLN is 14 digits - X999-999-999
DLN = re.search('\D\d{3}-\d{4}-\d{4}', full_str).group(0)
# get DL number for TX
if state == 'TX':
DLN = re.search('\d{6}9', full_str).group(0)
# get DL number for FL
if state == 'FL':
DLN = re.search('\D\d{3}-\d{3}-\d{2}-\d{3}-\d', full_str).group(0) # FL DLN is 17 digits
# get DL number for PA
if state == 'PA':
DLN = re.search('DLN\:\d{8}', full_str).group(0)[4:] # PA DLN is 8 digits
# get DL number for WI
if state == 'WI':
DLN = re.search('\D\d{3}-\d{4}-\d{4}-\d{2}', full_str).group(0) # WI DLN is 14 digits
# get DL number for CA
if state == 'CA':
DLN = re.search('\D\d{7}', full_str).group(0) # WI DLN is 8 digits
# get DL number for AL
if state == 'AL':
DLN = re.search('NO\.\d{7}', full_str).group(0)[3:] # WI DLN is 7 digits
print('----> License Number: ', DLN)
#### GET DOB and EXPIRY DATE
dtformat = True
DATES = re.findall('(\\d{1,2}/\\d{1,2}/\\d{4})', full_str) #date separator by slashes
if len(DATES) == 0:
dtformat = False
DATES = re.findall('(\d{1,2}-\d{1,2}-\d{4})', full_str) # date separator as -
if len(DATES) == 0: raise Exception('dates not found on drivers license')
#remove duplicates from the dates. there are duplicates because full_text for some reason contain two copies
imp_DATES = []
for t_date in DATES:
if t_date not in imp_DATES:
imp_DATES.append(t_date)
###
### TO CAPTURE Date of Birth and expiry date of the Driving license, SORT dates in scending order
### smallet date would be DOB and farthest date would be expiry date
###
import datetime
DLN_valid = True
if dtformat : | DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## check if DL is still valid
else:
imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m-%d-%Y'))
EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], "%m-%d-%Y")
DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## Check if DL is not valid
DOB = imp_DATES[0] ## oldest date will be DOB
EXP = imp_DATES[-1] ## Latest date will be Expiry date of DL
print('----> DOB, EXPIRY: ', DOB, EXP)
ret_obj = {
"DLN": DLN,
"DLN_valid": DLN_valid,
"DL_State": state,
"DOB": DOB,
"EXP_DT": EXP,
"address": postal_address,
"verified":verified
}
# end of else - Verified address
return ret_obj
###
#### function to build the response for CHATFUEL JSON API
###
def build_resp(dlobj):
try:
# build the Full response dictionary
if dlobj['DLN_valid'] :
if dlobj['verified']:### build success message, display details and show quick reply buttons
print("Good driving license \n")
resp_dict = {
"set_attributes": {
"validDL":"YES",
"validAddress" : "YES",
"jsonAPIError": "NO",
"DLN" : dlobj['DLN'],
"DL_DOB" : dlobj['DOB'],
"DL_EXP":dlobj['EXP_DT'],
"DL_add_ln1": dlobj['address']['add_ln1'],
"DL_add_ln2": dlobj['address']['add_ln2'],
"DL_city": dlobj['address']['city'],
"DL_state": dlobj['address']['state'],
"DL_zip": dlobj['address']['zip']
},
"messages": [
{
"text": "We have scanned the drivers license you provided. Please confirm the below details"
},
{
"text": "DL Number: " + dlobj['DLN']
},
{
"text": "Date of Birth: " + dlobj['DOB']
},
{
"text": "DL Validity: " + dlobj['EXP_DT']
},
{
"text": "Address: " + dlobj['address']['add_ln1'] + ',\n' + dlobj['address']['add_ln2'] + ',\n' + dlobj['address']['city'] + ', ' + dlobj['address']['state'] + ' ' + dlobj['address']['zip']
}
]
}
else:
### Address could not be verified...
print("DL Address is not confirmed as valid \n")
resp_dict = {
"set_attributes": {
"validDL":"YES",
"validAddress" : "NO",
"jsonAPIError": "NO"
},
"messages": [
{
"text": "Thanks for providing the DL image. "
},
{
"text": "We could not validate the address. I will let our representative contact you within 24 hours, to process your request appropriately."
}
]
}
else:
### DL Expired
print("Driving license has expired!!! \n")
resp_dict = {
"set_attributes": {
"validDL":"NO",
"validAddress" : "NO" if not dlobj['verified'] else "YES",
"jsonAPIError": "NO"
},
"messages": [
{
"text": "Thanks for providing the DL image. "
},
{
"text": "We observed an issue with the document provided. I will let our representative contact you within 24 hours, to process your request appropriately."
}
]
}
except Exception as e:
print(e)
print('###@@@@### Error occured by building response dictionary object')
sentry.captureMessage(message=e, level=logging.FATAL)
resp_dict = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the details for your drivers license - 104."}
]
}
return resp_dict;
##### END OF FUNCTION - build_resp
###################################################################
app = Flask(__name__)
#set sentry for logging the messages
sentry = Sentry(app, dsn='https://e8ddaf32cc924aa295b846f4947a9332:5e52d48fe13a4d2c82babe6833c5f871@sentry.io/273115')
CORS(app) ## cross origin resource whitelisting..
## dl ocr api on flask
@app.route('/dlocr_api', methods=['POST','GET'])
def get_DL():
"""API Call
Pandas dataframe (sent as a payload) from API Call
"""
#print("\n\n Started processing the GET request..\n")
##################
# REQUEST STRCUTRE
# imgurl
#################
try:
#req = request.json
img_path = request.args.get('imgurl', type= str)
print("##This is the request:", request.args , '\n\n')
#print("##This is the request JSON:", str(request.get_json()), '\n\n')
sentry.captureMessage(message='Started processing request- {}'.format(img_path), level=logging.INFO)
except Exception as e:
print(e)
sentry.captureMessage(message=e, level=logging.FATAL)
resp = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the DL image details for your vehicle - 102."},
]
}
try:
#img_path = "DL Tests\illinois-DL.jpg"
# call google vision API
DL_Text = DL_OCR_VISION(img_path)
#parse to DL objects
dlobj = parse_DL(DL_Text)
print ('Parsed DL Info:', dlobj)
#build response structure
resp = build_resp(dlobj)
#resp = dlobj
#sentry.captureMessage(message='completed processing the DL OCR: {}'.format(dlobj['DLN']), level=logging.INFO)
except Exception as e:
print(e)
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
resp = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the details for your drivers license - 103."},
]
}
print ("--- Response -->", resp)
return jsonify(resp)
#### END OF function
# main function
if __name__ == '__main__':
## DISABLE CERITIFACATE VERIFICATION FOR SSL.. some issue in Capgemini network..
'''
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
'''
sentry.captureMessage('Started runnning API for DL COR !!')
#app.run(debug= True)
app.run(debug=True,port=5100) #turnoff debug for production deployment | imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m/%d/%Y'))
EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], "%m/%d/%Y") | random_line_split |
dl_ocr_API.py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 15:24:56 2018
@author: kboosam
"""
'''
@@ API TO CAPTURE THE DRIVING LICENSE DETAILS FROM GOOGLE VISION API
'''
# Importing libraries
#import pandas as pd
from flask import Flask, jsonify, request
import logging
from flask_cors import CORS
#import numpy as np
from raven.contrib.flask import Sentry ## Sentry logging
#import requests
import json
import http.client
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
import io
import re, os
from random import randint
import urllib.request as req
##
## FUNCTION TO CALL GGOGLE VISION API WITH THE DL IMAGE
##
def DL_OCR_VISION(path):
try:
## First download the file for Google Vision API call
img_loc = "DL_tmp_"+str(randint(100001, 199999))+".jpg"
req.urlretrieve(path, img_loc)
print('---> Image file downloaded at:', img_loc)
client = vision.ImageAnnotatorClient()
''' for remote image - it didn't work as google rejected accessing FB images
image = types.Image()
image.source.image_uri = path
'''
# THIS IS FOR LOCAL FILE
with io.open(img_loc, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('-------> Calling google vision API conplete')
## Delete the downloaded image file
os.remove(img_loc)
ret_text = ''
#if response.error==:
#print('Texts:', texts)
for text in texts:
ret_text += text.description
#print(text , type(text))
#ret_text.replace('\n',' ') # replace new line charachters
ret_text = ' '.join(ret_text.split())
except Exception as e:
print(e)
print('Error occured while calling the google vision API - 105')
return ret_text ## retunrs a string of all text from the driving license
####
### FUNCTION TO PARSE THE TEXTS returned from Vision API to a DL object
####
'''
DL OBject structure
{
DLN : <drivers license number>,
DLN_valid: <False / True>
DOB : <bate of birth as str>,
EXP_DT : <exp date as str>,
address: {
add_ln1: <line 1>,
add_ln2: <line 2>,
city: <city>,
state: <state code>,
zip: <zip 5 or 5+4>
},
verified: <False/True> "valid address or not"
}
'''
def parse_DL(full_text):
print('full text - ', full_text)
## Remove non-ascii characters that are inserted by google vision sometimes.
all_ascii = ''.join(char for char in full_text if ord(char) < 128)
if full_text != all_ascii :
print('### ---- ### Non-ascii charachters removed from text', all_ascii)
full_text = all_ascii
state = ' ' ## Initialize
if full_text.count('Texas') or full_text.count('TX') > 0 : state = 'TX'
if full_text.count('Sunshine') > 0 and full_text.count('FL') : state='FL'
if full_text.count('Jes') > 0 and full_text.count('White') : state = 'IL'
if full_text.count('visitPA') > 0 : state='PA'
if full_text.count('WISCON') > 0 : state='WI'
if full_text.count('CALIF') > 0 : state='CA'
if full_text.count('ALABAMA') > 0 : state='AL'
if state in ['TX', 'PA', 'IL', 'WI']:
full_text = full_text.replace(' 1 ',' ') # replace FIELD LABELS
full_text = full_text.replace(' 2 ',' ') # replace FIELD LABELS
full_text = full_text.replace(' 8 ',' ') # replace FIELD LABELS
if state=='TX' :
full_text = full_text.replace(' 3 ',' ')
full_text = full_text.replace(' 4b ',' ') # replace FIELD LABELS
full_text = full_text.replace('\n',' ')
else:
full_text = full_text.replace('\n',' ')
#### Call Smarty Streets API to find address from text
try:
conn = http.client.HTTPSConnection("us-extract.api.smartystreets.com")
payload = full_text #send full text
headers = {
'content-type': "text/plain",
'host': "us-extract.api.smartystreets.com",
'cache-control': "no-cache"
}
conn.request("POST", "/?auth-id=eff0b523-c528-0292-6685-6ad2c5a6e92a&auth-token=V7pWleHG8yLUS8CC7NqQ", payload, headers)
SSresp = conn.getresponse()
print('---->Call to SmartyStreets successful: ', SSresp)
except Exception as e:
print('###@@@@### Error occured while calling the SmartyStreets API for address extraction')
print(e)
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
try:
SSresp = json.loads(SSresp.read())
print ('\n\n ---> Response from SmartyStreets', SSresp)
verified = SSresp['addresses'][0]['verified'] # address validity
if not verified : ## Checking if the address is valid
postal_address = {
"add_ln1":SSresp['addresses'][0]['text']
}
# when address is not valid we are just sending the identified address string in the line 1
print('Address on DL is invalid:', SSresp['addresses'][0]['text'] )
else:
#extract the address object
address = SSresp['addresses'][0]['api_output'][0]
## fomulate address
postal_address = {
"add_ln1": address['delivery_line_1'],
"add_ln2": '',
"city": address['components']['city_name'],
"state": address['components']['state_abbreviation'],
"zip": address['components']['zipcode'] + '-' + address['components']['plus4_code']
}
state = address['components']['state_abbreviation'] # get state code for all other work.
### END OF IF ELSE STRUCTURE
except Exception as e:
print(e)
print('###@@@@### Error occured while building address from SmartyStreets API response')
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
## make a continuous string without spaces by concatenating all individual texts from google
full_str = ''.join(full_text.split())
print('---->Fetching DLN; Address state is:', state)
# get DL number for IL
if state == 'IL':
# IL DLN is 14 digits - X999-999-999
DLN = re.search('\D\d{3}-\d{4}-\d{4}', full_str).group(0)
# get DL number for TX
if state == 'TX':
DLN = re.search('\d{6}9', full_str).group(0)
# get DL number for FL
if state == 'FL':
DLN = re.search('\D\d{3}-\d{3}-\d{2}-\d{3}-\d', full_str).group(0) # FL DLN is 17 digits
# get DL number for PA
if state == 'PA':
DLN = re.search('DLN\:\d{8}', full_str).group(0)[4:] # PA DLN is 8 digits
# get DL number for WI
if state == 'WI':
DLN = re.search('\D\d{3}-\d{4}-\d{4}-\d{2}', full_str).group(0) # WI DLN is 14 digits
# get DL number for CA
if state == 'CA':
DLN = re.search('\D\d{7}', full_str).group(0) # WI DLN is 8 digits
# get DL number for AL
if state == 'AL':
DLN = re.search('NO\.\d{7}', full_str).group(0)[3:] # WI DLN is 7 digits
print('----> License Number: ', DLN)
#### GET DOB and EXPIRY DATE
dtformat = True
DATES = re.findall('(\\d{1,2}/\\d{1,2}/\\d{4})', full_str) #date separator by slashes
if len(DATES) == 0:
dtformat = False
DATES = re.findall('(\d{1,2}-\d{1,2}-\d{4})', full_str) # date separator as -
if len(DATES) == 0: raise Exception('dates not found on drivers license')
#remove duplicates from the dates. there are duplicates because full_text for some reason contain two copies
imp_DATES = []
for t_date in DATES:
if t_date not in imp_DATES:
imp_DATES.append(t_date)
###
### TO CAPTURE Date of Birth and expiry date of the Driving license, SORT dates in scending order
### smallet date would be DOB and farthest date would be expiry date
###
import datetime
DLN_valid = True
if dtformat :
imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m/%d/%Y'))
EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], "%m/%d/%Y")
DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## check if DL is still valid
else:
imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m-%d-%Y'))
EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], "%m-%d-%Y")
DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## Check if DL is not valid
DOB = imp_DATES[0] ## oldest date will be DOB
EXP = imp_DATES[-1] ## Latest date will be Expiry date of DL
print('----> DOB, EXPIRY: ', DOB, EXP)
ret_obj = {
"DLN": DLN,
"DLN_valid": DLN_valid,
"DL_State": state,
"DOB": DOB,
"EXP_DT": EXP,
"address": postal_address,
"verified":verified
}
# end of else - Verified address
return ret_obj
###
#### function to build the response for CHATFUEL JSON API
###
def build_resp(dlobj):
try:
# build the Full response dictionary
if dlobj['DLN_valid'] :
if dlobj['verified']:### build success message, display details and show quick reply buttons
print("Good driving license \n")
resp_dict = {
"set_attributes": {
"validDL":"YES",
"validAddress" : "YES",
"jsonAPIError": "NO",
"DLN" : dlobj['DLN'],
"DL_DOB" : dlobj['DOB'],
"DL_EXP":dlobj['EXP_DT'],
"DL_add_ln1": dlobj['address']['add_ln1'],
"DL_add_ln2": dlobj['address']['add_ln2'],
"DL_city": dlobj['address']['city'],
"DL_state": dlobj['address']['state'],
"DL_zip": dlobj['address']['zip']
},
"messages": [
{
"text": "We have scanned the drivers license you provided. Please confirm the below details"
},
{
"text": "DL Number: " + dlobj['DLN']
},
{
"text": "Date of Birth: " + dlobj['DOB']
},
{
"text": "DL Validity: " + dlobj['EXP_DT']
},
{
"text": "Address: " + dlobj['address']['add_ln1'] + ',\n' + dlobj['address']['add_ln2'] + ',\n' + dlobj['address']['city'] + ', ' + dlobj['address']['state'] + ' ' + dlobj['address']['zip']
}
]
}
else:
### Address could not be verified...
print("DL Address is not confirmed as valid \n")
resp_dict = {
"set_attributes": {
"validDL":"YES",
"validAddress" : "NO",
"jsonAPIError": "NO"
},
"messages": [
{
"text": "Thanks for providing the DL image. "
},
{
"text": "We could not validate the address. I will let our representative contact you within 24 hours, to process your request appropriately."
}
]
}
else:
### DL Expired
print("Driving license has expired!!! \n")
resp_dict = {
"set_attributes": {
"validDL":"NO",
"validAddress" : "NO" if not dlobj['verified'] else "YES",
"jsonAPIError": "NO"
},
"messages": [
{
"text": "Thanks for providing the DL image. "
},
{
"text": "We observed an issue with the document provided. I will let our representative contact you within 24 hours, to process your request appropriately."
}
]
}
except Exception as e:
print(e)
print('###@@@@### Error occured by building response dictionary object')
sentry.captureMessage(message=e, level=logging.FATAL)
resp_dict = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the details for your drivers license - 104."}
]
}
return resp_dict;
##### END OF FUNCTION - build_resp
###################################################################
app = Flask(__name__)
#set sentry for logging the messages
sentry = Sentry(app, dsn='https://e8ddaf32cc924aa295b846f4947a9332:5e52d48fe13a4d2c82babe6833c5f871@sentry.io/273115')
CORS(app) ## cross origin resource whitelisting..
## dl ocr api on flask
@app.route('/dlocr_api', methods=['POST','GET'])
def get_DL():
|
#### END OF function
# main function
if __name__ == '__main__':
## DISABLE CERITIFACATE VERIFICATION FOR SSL.. some issue in Capgemini network..
'''
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
'''
sentry.captureMessage('Started runnning API for DL COR !!')
#app.run(debug= True)
app.run(debug=True,port=5100) #turnoff debug for production deployment
| """API Call
Pandas dataframe (sent as a payload) from API Call
"""
#print("\n\n Started processing the GET request..\n")
##################
# REQUEST STRCUTRE
# imgurl
#################
try:
#req = request.json
img_path = request.args.get('imgurl', type= str)
print("##This is the request:", request.args , '\n\n')
#print("##This is the request JSON:", str(request.get_json()), '\n\n')
sentry.captureMessage(message='Started processing request- {}'.format(img_path), level=logging.INFO)
except Exception as e:
print(e)
sentry.captureMessage(message=e, level=logging.FATAL)
resp = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the DL image details for your vehicle - 102."},
]
}
try:
#img_path = "DL Tests\illinois-DL.jpg"
# call google vision API
DL_Text = DL_OCR_VISION(img_path)
#parse to DL objects
dlobj = parse_DL(DL_Text)
print ('Parsed DL Info:', dlobj)
#build response structure
resp = build_resp(dlobj)
#resp = dlobj
#sentry.captureMessage(message='completed processing the DL OCR: {}'.format(dlobj['DLN']), level=logging.INFO)
except Exception as e:
print(e)
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
resp = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the details for your drivers license - 103."},
]
}
print ("--- Response -->", resp)
return jsonify(resp) | identifier_body |
dl_ocr_API.py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 15:24:56 2018
@author: kboosam
"""
'''
@@ API TO CAPTURE THE DRIVING LICENSE DETAILS FROM GOOGLE VISION API
'''
# Importing libraries
#import pandas as pd
from flask import Flask, jsonify, request
import logging
from flask_cors import CORS
#import numpy as np
from raven.contrib.flask import Sentry ## Sentry logging
#import requests
import json
import http.client
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
import io
import re, os
from random import randint
import urllib.request as req
##
## FUNCTION TO CALL GGOGLE VISION API WITH THE DL IMAGE
##
def DL_OCR_VISION(path):
try:
## First download the file for Google Vision API call
img_loc = "DL_tmp_"+str(randint(100001, 199999))+".jpg"
req.urlretrieve(path, img_loc)
print('---> Image file downloaded at:', img_loc)
client = vision.ImageAnnotatorClient()
''' for remote image - it didn't work as google rejected accessing FB images
image = types.Image()
image.source.image_uri = path
'''
# THIS IS FOR LOCAL FILE
with io.open(img_loc, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('-------> Calling google vision API conplete')
## Delete the downloaded image file
os.remove(img_loc)
ret_text = ''
#if response.error==:
#print('Texts:', texts)
for text in texts:
ret_text += text.description
#print(text , type(text))
#ret_text.replace('\n',' ') # replace new line charachters
ret_text = ' '.join(ret_text.split())
except Exception as e:
print(e)
print('Error occured while calling the google vision API - 105')
return ret_text ## retunrs a string of all text from the driving license
####
### FUNCTION TO PARSE THE TEXTS returned from Vision API to a DL object
####
'''
DL OBject structure
{
DLN : <drivers license number>,
DLN_valid: <False / True>
DOB : <bate of birth as str>,
EXP_DT : <exp date as str>,
address: {
add_ln1: <line 1>,
add_ln2: <line 2>,
city: <city>,
state: <state code>,
zip: <zip 5 or 5+4>
},
verified: <False/True> "valid address or not"
}
'''
def parse_DL(full_text):
print('full text - ', full_text)
## Remove non-ascii characters that are inserted by google vision sometimes.
all_ascii = ''.join(char for char in full_text if ord(char) < 128)
if full_text != all_ascii :
print('### ---- ### Non-ascii charachters removed from text', all_ascii)
full_text = all_ascii
state = ' ' ## Initialize
if full_text.count('Texas') or full_text.count('TX') > 0 : state = 'TX'
if full_text.count('Sunshine') > 0 and full_text.count('FL') : state='FL'
if full_text.count('Jes') > 0 and full_text.count('White') : state = 'IL'
if full_text.count('visitPA') > 0 : state='PA'
if full_text.count('WISCON') > 0 : state='WI'
if full_text.count('CALIF') > 0 : state='CA'
if full_text.count('ALABAMA') > 0 : state='AL'
if state in ['TX', 'PA', 'IL', 'WI']:
full_text = full_text.replace(' 1 ',' ') # replace FIELD LABELS
full_text = full_text.replace(' 2 ',' ') # replace FIELD LABELS
full_text = full_text.replace(' 8 ',' ') # replace FIELD LABELS
if state=='TX' :
full_text = full_text.replace(' 3 ',' ')
full_text = full_text.replace(' 4b ',' ') # replace FIELD LABELS
full_text = full_text.replace('\n',' ')
else:
full_text = full_text.replace('\n',' ')
#### Call Smarty Streets API to find address from text
try:
conn = http.client.HTTPSConnection("us-extract.api.smartystreets.com")
payload = full_text #send full text
headers = {
'content-type': "text/plain",
'host': "us-extract.api.smartystreets.com",
'cache-control': "no-cache"
}
conn.request("POST", "/?auth-id=eff0b523-c528-0292-6685-6ad2c5a6e92a&auth-token=V7pWleHG8yLUS8CC7NqQ", payload, headers)
SSresp = conn.getresponse()
print('---->Call to SmartyStreets successful: ', SSresp)
except Exception as e:
print('###@@@@### Error occured while calling the SmartyStreets API for address extraction')
print(e)
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
try:
SSresp = json.loads(SSresp.read())
print ('\n\n ---> Response from SmartyStreets', SSresp)
verified = SSresp['addresses'][0]['verified'] # address validity
if not verified : ## Checking if the address is valid
postal_address = {
"add_ln1":SSresp['addresses'][0]['text']
}
# when address is not valid we are just sending the identified address string in the line 1
print('Address on DL is invalid:', SSresp['addresses'][0]['text'] )
else:
#extract the address object
address = SSresp['addresses'][0]['api_output'][0]
## fomulate address
postal_address = {
"add_ln1": address['delivery_line_1'],
"add_ln2": '',
"city": address['components']['city_name'],
"state": address['components']['state_abbreviation'],
"zip": address['components']['zipcode'] + '-' + address['components']['plus4_code']
}
state = address['components']['state_abbreviation'] # get state code for all other work.
### END OF IF ELSE STRUCTURE
except Exception as e:
print(e)
print('###@@@@### Error occured while building address from SmartyStreets API response')
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
## make a continuous string without spaces by concatenating all individual texts from google
full_str = ''.join(full_text.split())
print('---->Fetching DLN; Address state is:', state)
# get DL number for IL
if state == 'IL':
# IL DLN is 14 digits - X999-999-999
DLN = re.search('\D\d{3}-\d{4}-\d{4}', full_str).group(0)
# get DL number for TX
if state == 'TX':
DLN = re.search('\d{6}9', full_str).group(0)
# get DL number for FL
if state == 'FL':
DLN = re.search('\D\d{3}-\d{3}-\d{2}-\d{3}-\d', full_str).group(0) # FL DLN is 17 digits
# get DL number for PA
if state == 'PA':
DLN = re.search('DLN\:\d{8}', full_str).group(0)[4:] # PA DLN is 8 digits
# get DL number for WI
if state == 'WI':
DLN = re.search('\D\d{3}-\d{4}-\d{4}-\d{2}', full_str).group(0) # WI DLN is 14 digits
# get DL number for CA
if state == 'CA':
DLN = re.search('\D\d{7}', full_str).group(0) # WI DLN is 8 digits
# get DL number for AL
if state == 'AL':
DLN = re.search('NO\.\d{7}', full_str).group(0)[3:] # WI DLN is 7 digits
print('----> License Number: ', DLN)
#### GET DOB and EXPIRY DATE
dtformat = True
DATES = re.findall('(\\d{1,2}/\\d{1,2}/\\d{4})', full_str) #date separator by slashes
if len(DATES) == 0:
dtformat = False
DATES = re.findall('(\d{1,2}-\d{1,2}-\d{4})', full_str) # date separator as -
if len(DATES) == 0: raise Exception('dates not found on drivers license')
#remove duplicates from the dates. there are duplicates because full_text for some reason contain two copies
imp_DATES = []
for t_date in DATES:
if t_date not in imp_DATES:
imp_DATES.append(t_date)
###
### TO CAPTURE Date of Birth and expiry date of the Driving license, SORT dates in scending order
### smallet date would be DOB and farthest date would be expiry date
###
import datetime
DLN_valid = True
if dtformat :
imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m/%d/%Y'))
EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], "%m/%d/%Y")
DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## check if DL is still valid
else:
imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m-%d-%Y'))
EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], "%m-%d-%Y")
DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## Check if DL is not valid
DOB = imp_DATES[0] ## oldest date will be DOB
EXP = imp_DATES[-1] ## Latest date will be Expiry date of DL
print('----> DOB, EXPIRY: ', DOB, EXP)
ret_obj = {
"DLN": DLN,
"DLN_valid": DLN_valid,
"DL_State": state,
"DOB": DOB,
"EXP_DT": EXP,
"address": postal_address,
"verified":verified
}
# end of else - Verified address
return ret_obj
###
#### function to build the response for CHATFUEL JSON API
###
def build_resp(dlobj):
try:
# build the Full response dictionary
if dlobj['DLN_valid'] :
if dlobj['verified']:### build success message, display details and show quick reply buttons
print("Good driving license \n")
resp_dict = {
"set_attributes": {
"validDL":"YES",
"validAddress" : "YES",
"jsonAPIError": "NO",
"DLN" : dlobj['DLN'],
"DL_DOB" : dlobj['DOB'],
"DL_EXP":dlobj['EXP_DT'],
"DL_add_ln1": dlobj['address']['add_ln1'],
"DL_add_ln2": dlobj['address']['add_ln2'],
"DL_city": dlobj['address']['city'],
"DL_state": dlobj['address']['state'],
"DL_zip": dlobj['address']['zip']
},
"messages": [
{
"text": "We have scanned the drivers license you provided. Please confirm the below details"
},
{
"text": "DL Number: " + dlobj['DLN']
},
{
"text": "Date of Birth: " + dlobj['DOB']
},
{
"text": "DL Validity: " + dlobj['EXP_DT']
},
{
"text": "Address: " + dlobj['address']['add_ln1'] + ',\n' + dlobj['address']['add_ln2'] + ',\n' + dlobj['address']['city'] + ', ' + dlobj['address']['state'] + ' ' + dlobj['address']['zip']
}
]
}
else:
### Address could not be verified...
print("DL Address is not confirmed as valid \n")
resp_dict = {
"set_attributes": {
"validDL":"YES",
"validAddress" : "NO",
"jsonAPIError": "NO"
},
"messages": [
{
"text": "Thanks for providing the DL image. "
},
{
"text": "We could not validate the address. I will let our representative contact you within 24 hours, to process your request appropriately."
}
]
}
else:
### DL Expired
print("Driving license has expired!!! \n")
resp_dict = {
"set_attributes": {
"validDL":"NO",
"validAddress" : "NO" if not dlobj['verified'] else "YES",
"jsonAPIError": "NO"
},
"messages": [
{
"text": "Thanks for providing the DL image. "
},
{
"text": "We observed an issue with the document provided. I will let our representative contact you within 24 hours, to process your request appropriately."
}
]
}
except Exception as e:
print(e)
print('###@@@@### Error occured by building response dictionary object')
sentry.captureMessage(message=e, level=logging.FATAL)
resp_dict = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the details for your drivers license - 104."}
]
}
return resp_dict;
##### END OF FUNCTION - build_resp
###################################################################
app = Flask(__name__)
#set sentry for logging the messages
sentry = Sentry(app, dsn='https://e8ddaf32cc924aa295b846f4947a9332:5e52d48fe13a4d2c82babe6833c5f871@sentry.io/273115')
CORS(app) ## cross origin resource whitelisting..
## dl ocr api on flask
@app.route('/dlocr_api', methods=['POST','GET'])
def | ():
"""API Call
Pandas dataframe (sent as a payload) from API Call
"""
#print("\n\n Started processing the GET request..\n")
##################
# REQUEST STRCUTRE
# imgurl
#################
try:
#req = request.json
img_path = request.args.get('imgurl', type= str)
print("##This is the request:", request.args , '\n\n')
#print("##This is the request JSON:", str(request.get_json()), '\n\n')
sentry.captureMessage(message='Started processing request- {}'.format(img_path), level=logging.INFO)
except Exception as e:
print(e)
sentry.captureMessage(message=e, level=logging.FATAL)
resp = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the DL image details for your vehicle - 102."},
]
}
try:
#img_path = "DL Tests\illinois-DL.jpg"
# call google vision API
DL_Text = DL_OCR_VISION(img_path)
#parse to DL objects
dlobj = parse_DL(DL_Text)
print ('Parsed DL Info:', dlobj)
#build response structure
resp = build_resp(dlobj)
#resp = dlobj
#sentry.captureMessage(message='completed processing the DL OCR: {}'.format(dlobj['DLN']), level=logging.INFO)
except Exception as e:
print(e)
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
resp = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the details for your drivers license - 103."},
]
}
print ("--- Response -->", resp)
return jsonify(resp)
#### END OF function
# main function
if __name__ == '__main__':
## DISABLE CERITIFACATE VERIFICATION FOR SSL.. some issue in Capgemini network..
'''
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
'''
sentry.captureMessage('Started runnning API for DL COR !!')
#app.run(debug= True)
app.run(debug=True,port=5100) #turnoff debug for production deployment
| get_DL | identifier_name |
dl_ocr_API.py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 15:24:56 2018
@author: kboosam
"""
'''
@@ API TO CAPTURE THE DRIVING LICENSE DETAILS FROM GOOGLE VISION API
'''
# Importing libraries
#import pandas as pd
from flask import Flask, jsonify, request
import logging
from flask_cors import CORS
#import numpy as np
from raven.contrib.flask import Sentry ## Sentry logging
#import requests
import json
import http.client
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
import io
import re, os
from random import randint
import urllib.request as req
##
## FUNCTION TO CALL GGOGLE VISION API WITH THE DL IMAGE
##
def DL_OCR_VISION(path):
try:
## First download the file for Google Vision API call
img_loc = "DL_tmp_"+str(randint(100001, 199999))+".jpg"
req.urlretrieve(path, img_loc)
print('---> Image file downloaded at:', img_loc)
client = vision.ImageAnnotatorClient()
''' for remote image - it didn't work as google rejected accessing FB images
image = types.Image()
image.source.image_uri = path
'''
# THIS IS FOR LOCAL FILE
with io.open(img_loc, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('-------> Calling google vision API conplete')
## Delete the downloaded image file
os.remove(img_loc)
ret_text = ''
#if response.error==:
#print('Texts:', texts)
for text in texts:
ret_text += text.description
#print(text , type(text))
#ret_text.replace('\n',' ') # replace new line charachters
ret_text = ' '.join(ret_text.split())
except Exception as e:
print(e)
print('Error occured while calling the google vision API - 105')
return ret_text ## retunrs a string of all text from the driving license
####
### FUNCTION TO PARSE THE TEXTS returned from Vision API to a DL object
####
'''
DL OBject structure
{
DLN : <drivers license number>,
DLN_valid: <False / True>
DOB : <bate of birth as str>,
EXP_DT : <exp date as str>,
address: {
add_ln1: <line 1>,
add_ln2: <line 2>,
city: <city>,
state: <state code>,
zip: <zip 5 or 5+4>
},
verified: <False/True> "valid address or not"
}
'''
def parse_DL(full_text):
print('full text - ', full_text)
## Remove non-ascii characters that are inserted by google vision sometimes.
all_ascii = ''.join(char for char in full_text if ord(char) < 128)
if full_text != all_ascii :
print('### ---- ### Non-ascii charachters removed from text', all_ascii)
full_text = all_ascii
state = ' ' ## Initialize
if full_text.count('Texas') or full_text.count('TX') > 0 : state = 'TX'
if full_text.count('Sunshine') > 0 and full_text.count('FL') : state='FL'
if full_text.count('Jes') > 0 and full_text.count('White') : state = 'IL'
if full_text.count('visitPA') > 0 : state='PA'
if full_text.count('WISCON') > 0 : state='WI'
if full_text.count('CALIF') > 0 : state='CA'
if full_text.count('ALABAMA') > 0 : state='AL'
if state in ['TX', 'PA', 'IL', 'WI']:
full_text = full_text.replace(' 1 ',' ') # replace FIELD LABELS
full_text = full_text.replace(' 2 ',' ') # replace FIELD LABELS
full_text = full_text.replace(' 8 ',' ') # replace FIELD LABELS
if state=='TX' :
full_text = full_text.replace(' 3 ',' ')
full_text = full_text.replace(' 4b ',' ') # replace FIELD LABELS
full_text = full_text.replace('\n',' ')
else:
full_text = full_text.replace('\n',' ')
#### Call Smarty Streets API to find address from text
try:
conn = http.client.HTTPSConnection("us-extract.api.smartystreets.com")
payload = full_text #send full text
headers = {
'content-type': "text/plain",
'host': "us-extract.api.smartystreets.com",
'cache-control': "no-cache"
}
conn.request("POST", "/?auth-id=eff0b523-c528-0292-6685-6ad2c5a6e92a&auth-token=V7pWleHG8yLUS8CC7NqQ", payload, headers)
SSresp = conn.getresponse()
print('---->Call to SmartyStreets successful: ', SSresp)
except Exception as e:
print('###@@@@### Error occured while calling the SmartyStreets API for address extraction')
print(e)
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
try:
SSresp = json.loads(SSresp.read())
print ('\n\n ---> Response from SmartyStreets', SSresp)
verified = SSresp['addresses'][0]['verified'] # address validity
if not verified : ## Checking if the address is valid
postal_address = {
"add_ln1":SSresp['addresses'][0]['text']
}
# when address is not valid we are just sending the identified address string in the line 1
print('Address on DL is invalid:', SSresp['addresses'][0]['text'] )
else:
#extract the address object
|
### END OF IF ELSE STRUCTURE
except Exception as e:
print(e)
print('###@@@@### Error occured while building address from SmartyStreets API response')
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
## make a continuous string without spaces by concatenating all individual texts from google
full_str = ''.join(full_text.split())
print('---->Fetching DLN; Address state is:', state)
# get DL number for IL
if state == 'IL':
# IL DLN is 14 digits - X999-999-999
DLN = re.search('\D\d{3}-\d{4}-\d{4}', full_str).group(0)
# get DL number for TX
if state == 'TX':
DLN = re.search('\d{6}9', full_str).group(0)
# get DL number for FL
if state == 'FL':
DLN = re.search('\D\d{3}-\d{3}-\d{2}-\d{3}-\d', full_str).group(0) # FL DLN is 17 digits
# get DL number for PA
if state == 'PA':
DLN = re.search('DLN\:\d{8}', full_str).group(0)[4:] # PA DLN is 8 digits
# get DL number for WI
if state == 'WI':
DLN = re.search('\D\d{3}-\d{4}-\d{4}-\d{2}', full_str).group(0) # WI DLN is 14 digits
# get DL number for CA
if state == 'CA':
DLN = re.search('\D\d{7}', full_str).group(0) # WI DLN is 8 digits
# get DL number for AL
if state == 'AL':
DLN = re.search('NO\.\d{7}', full_str).group(0)[3:] # WI DLN is 7 digits
print('----> License Number: ', DLN)
#### GET DOB and EXPIRY DATE
dtformat = True
DATES = re.findall('(\\d{1,2}/\\d{1,2}/\\d{4})', full_str) #date separator by slashes
if len(DATES) == 0:
dtformat = False
DATES = re.findall('(\d{1,2}-\d{1,2}-\d{4})', full_str) # date separator as -
if len(DATES) == 0: raise Exception('dates not found on drivers license')
#remove duplicates from the dates. there are duplicates because full_text for some reason contain two copies
imp_DATES = []
for t_date in DATES:
if t_date not in imp_DATES:
imp_DATES.append(t_date)
###
### TO CAPTURE Date of Birth and expiry date of the Driving license, SORT dates in scending order
### smallet date would be DOB and farthest date would be expiry date
###
import datetime
DLN_valid = True
if dtformat :
imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m/%d/%Y'))
EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], "%m/%d/%Y")
DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## check if DL is still valid
else:
imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m-%d-%Y'))
EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], "%m-%d-%Y")
DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## Check if DL is not valid
DOB = imp_DATES[0] ## oldest date will be DOB
EXP = imp_DATES[-1] ## Latest date will be Expiry date of DL
print('----> DOB, EXPIRY: ', DOB, EXP)
ret_obj = {
"DLN": DLN,
"DLN_valid": DLN_valid,
"DL_State": state,
"DOB": DOB,
"EXP_DT": EXP,
"address": postal_address,
"verified":verified
}
# end of else - Verified address
return ret_obj
###
#### function to build the response for CHATFUEL JSON API
###
def build_resp(dlobj):
try:
# build the Full response dictionary
if dlobj['DLN_valid'] :
if dlobj['verified']:### build success message, display details and show quick reply buttons
print("Good driving license \n")
resp_dict = {
"set_attributes": {
"validDL":"YES",
"validAddress" : "YES",
"jsonAPIError": "NO",
"DLN" : dlobj['DLN'],
"DL_DOB" : dlobj['DOB'],
"DL_EXP":dlobj['EXP_DT'],
"DL_add_ln1": dlobj['address']['add_ln1'],
"DL_add_ln2": dlobj['address']['add_ln2'],
"DL_city": dlobj['address']['city'],
"DL_state": dlobj['address']['state'],
"DL_zip": dlobj['address']['zip']
},
"messages": [
{
"text": "We have scanned the drivers license you provided. Please confirm the below details"
},
{
"text": "DL Number: " + dlobj['DLN']
},
{
"text": "Date of Birth: " + dlobj['DOB']
},
{
"text": "DL Validity: " + dlobj['EXP_DT']
},
{
"text": "Address: " + dlobj['address']['add_ln1'] + ',\n' + dlobj['address']['add_ln2'] + ',\n' + dlobj['address']['city'] + ', ' + dlobj['address']['state'] + ' ' + dlobj['address']['zip']
}
]
}
else:
### Address could not be verified...
print("DL Address is not confirmed as valid \n")
resp_dict = {
"set_attributes": {
"validDL":"YES",
"validAddress" : "NO",
"jsonAPIError": "NO"
},
"messages": [
{
"text": "Thanks for providing the DL image. "
},
{
"text": "We could not validate the address. I will let our representative contact you within 24 hours, to process your request appropriately."
}
]
}
else:
### DL Expired
print("Driving license has expired!!! \n")
resp_dict = {
"set_attributes": {
"validDL":"NO",
"validAddress" : "NO" if not dlobj['verified'] else "YES",
"jsonAPIError": "NO"
},
"messages": [
{
"text": "Thanks for providing the DL image. "
},
{
"text": "We observed an issue with the document provided. I will let our representative contact you within 24 hours, to process your request appropriately."
}
]
}
except Exception as e:
print(e)
print('###@@@@### Error occured by building response dictionary object')
sentry.captureMessage(message=e, level=logging.FATAL)
resp_dict = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the details for your drivers license - 104."}
]
}
return resp_dict;
##### END OF FUNCTION - build_resp
###################################################################
app = Flask(__name__)
#set sentry for logging the messages
sentry = Sentry(app, dsn='https://e8ddaf32cc924aa295b846f4947a9332:5e52d48fe13a4d2c82babe6833c5f871@sentry.io/273115')
CORS(app) ## cross origin resource whitelisting..
## dl ocr api on flask
@app.route('/dlocr_api', methods=['POST','GET'])
def get_DL():
"""API Call
Pandas dataframe (sent as a payload) from API Call
"""
#print("\n\n Started processing the GET request..\n")
##################
# REQUEST STRCUTRE
# imgurl
#################
try:
#req = request.json
img_path = request.args.get('imgurl', type= str)
print("##This is the request:", request.args , '\n\n')
#print("##This is the request JSON:", str(request.get_json()), '\n\n')
sentry.captureMessage(message='Started processing request- {}'.format(img_path), level=logging.INFO)
except Exception as e:
print(e)
sentry.captureMessage(message=e, level=logging.FATAL)
resp = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the DL image details for your vehicle - 102."},
]
}
try:
#img_path = "DL Tests\illinois-DL.jpg"
# call google vision API
DL_Text = DL_OCR_VISION(img_path)
#parse to DL objects
dlobj = parse_DL(DL_Text)
print ('Parsed DL Info:', dlobj)
#build response structure
resp = build_resp(dlobj)
#resp = dlobj
#sentry.captureMessage(message='completed processing the DL OCR: {}'.format(dlobj['DLN']), level=logging.INFO)
except Exception as e:
print(e)
sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log
resp = {
"set_attributes": {
"jsonAPIError": "YES"
},
"messages": [
{"text": "An error occurred while fetching the details for your drivers license - 103."},
]
}
print ("--- Response -->", resp)
return jsonify(resp)
#### END OF function
# main function
if __name__ == '__main__':
## DISABLE CERITIFACATE VERIFICATION FOR SSL.. some issue in Capgemini network..
'''
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
'''
sentry.captureMessage('Started runnning API for DL COR !!')
#app.run(debug= True)
app.run(debug=True,port=5100) #turnoff debug for production deployment
| address = SSresp['addresses'][0]['api_output'][0]
## fomulate address
postal_address = {
"add_ln1": address['delivery_line_1'],
"add_ln2": '',
"city": address['components']['city_name'],
"state": address['components']['state_abbreviation'],
"zip": address['components']['zipcode'] + '-' + address['components']['plus4_code']
}
state = address['components']['state_abbreviation'] # get state code for all other work. | conditional_block |
LIST.py | #coding=utf-8
# ========================================
# 注意0:列表、字符串和元组都属于序列,但仅列表是可变对象
# 注意1:列表是一种可变(mutable)对象
# 注意2:列表方法,将直接改变列表结构
# 注意3:对列表的引用被修改,将直接改变原列表
# 注意4:在函数中引用列表,也将改变原列表
# 注意5:如果要禁止修改原列表,可在函数中生成对列表的完全拷贝
# 注意6:列表的插入和删除除非尾部元素时会涉及列表中大量元素的移动,效率较低
# 警告0:列表的可变性可能造成意想不到的bug!
# ========================================
# 注意:列表的元素可以是包含列表在内的任何类型数据
# 举例:
# [1, 2, 3, 4, 5]
# ['Tom', 'Joy', 'Lily', 'Harry']
# [5, 3.2, 'hello', ('a', 2, 3), [5, 6.2, 'world']]
# ["a", 3, "c", 1, "b", 2, "d", 4, "e"]
# ========================================
# 列表的索引和切片操作
# 注意:索引操作返回非独立于列表的元素
# 注意:切片操作返回独立于列表的元素拷贝列表
# 列表的索引
def list_index():
L = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 列表的每个元素都有一个序号,叫索引值,它标志元素的位置
# 列表有两套标记索引值的方法,一套正着数,一套倒着数
# 元素的索引值正着数(从左到右)依次为0, 1, 2, 3,...
# 元素的索引值倒着数(从右到左)依次为-1, -2, -3,...
# 获取列表的元素,要用索引操作
# 例如,要获取元素的第一个值,可以用正索引值:
print(L[0])
# 也可以用倒索引值:
print(L[-5])
# 而要获取元素的最后一个值,可以用正索引值:
print(L[4])
# 也可以用倒索引值:
print(L[-1])
# 注意:用不存在的索引值进行索引操作将引发异常IndexError
# print(L[7])
# 注意:索引操作,返回列表元素的非独立拷贝!与切片不同!
# 例如,以下结果返回True:
print(L[-1] is L[-1])
print(id(L[-1]) == id(L[-1]))
# 列表的切片
def list_slice():
L = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 获取列表的多个元素,可以使用列表的切片操作
# 切片操作有多种形式,如:
# 获取索引值在0~4(不含)区间的元素
print(L[0:4])
# 当起止切片的索引值在开头或结尾时,也可省略起止索引值
print(L[:4])
# 或者,用倒索引值也可以
print(L[-7:-3])
# 同样,使用倒索引也可以省略起止索引值
print(L[:-3])
# 甚至,还可以混合使用正索引值和倒索引值
print(L[0:-3])
# 或
print(L[-7:4])
# 注意:与索引操作不同,切片操作对不存在的索引值比较宽容
# 例如,获取索引值在0~4(不含)区间的元素,还可以用
print(L[-100:4])
# 因为会使写代码意图不明确,因此并不推荐如上用法
# --------------------
# 获取列表的完全拷贝,可以用
print(L[0:])
# 也可以利用切片操作对不存在的索引值宽容的特性写成
print(L[0:7])
# 或者,直接用省略起止索引值的办法(推荐)进行操作
print(L[:])
# --------------------
# 若需要获取列表中具有固定间隔的元素时,可以增加第三个数字
# 这种切片操作叫“步进式切片操作”
# 例如,每两个元素获取一个元素
print(L[0:7:2])
# 也可以用省略起止索引值的办法(推荐)进行操作
print(L[::2])
# --------------------
# 当第三个参数为负数时,将会进行逆序切片操作
# 例如,获取原列表的完整逆序拷贝
print(L[::-1])
# --------------------
# 这样,获取原列表的方法其实还有不传入第三个参数的办法
print(L[::])
# --------------------
# 注意:与索引操作不同,所有切片操作获得列表元素的浅拷贝
# 例如,以下结果返回False
print(L[0:1] is L[0:1])
print(id(L[0:1]) == id(L[0:1]))
if __name__ == '__main__':
list_index()
list_slice()
pass
#__import__("sys").exit()
# ========================================
# 列表方法
def list_method():
# 列表的方法,用于实现对列表元素的排序,以及增、删、改、查
# 列表的排序方法:sort(对混合列表无效)
# 注意:sort方法在原内存地址修改列表
# 注意:sort不能对['b', 1, 'a', 2]这样的混合列表排序
L1 = [1,3,2]
# 注意:修改操作在原内存地址进行,返回值为None
L1.sort()
print(L1)
L2 = ['c', 'a', 'b']
L2.sort()
print(L2)
# --------------------
# 列表的反转:reverse
# 注意:效果与切片操作list[::-1]相同,但是切片生成一个拷贝。
L = [1,3,2]
L.reverse()
print(L)
mixed_L = ["a", 3, "c", 1, "b", 2]
mixed_L.reverse()
#__import__("sys").exit()
# 插入list.insert():指定索引之前插入元素,其它元素后移。
# 注意:list[n] = "something"替换索引元素。
L = ["c","a","b"]
L.insert(2,"k")
print(L)
# 扩展list1.extend(list2)。改变原列表。
# 注意:合并操作list1 + list2生成一个拷贝。
L = [1,2]
L.extend(["a", "b"])
print(L)
# 追加list.append(item):在列表末尾增加元素。改变原列表。
L = [1,2]
L.append("abc")
print(L)
# 统计list.count(item):返回某个元素在列表中出现的频率。
# 注意:想一次统计多个元素,可参考模块collections。
L = ["a","b","c","c","d","c"]
print(L.count("c"))
# 索引list.index(item):返回列表中首次出现的索引。
#注意:对不存在的元素索引将引发ValueError。
L = ["a","b","c","c","d","c"]
print(L.index("c"))
# 列表值弹出list.pop(index):弹出指定索引元素并返回弹出元素。
# 注意:默认弹出最后一个元素。可用pop(3)弹出索引为3的元素。
# 注意:对于字典,pop(key)指弹出指定的键所对应的值。
L = ["a","b","d","c"]
print(L.pop(2))
print(L)
# 元素删除list.remove(item):移除某个值的首个匹配项。
L = ["a","b","c"]
L.remove("c")
print(L)
#同时遍历列表的索引和值:enumerate(list)
print(["%s:%s"%(index,item) for index,item in enumerate(["c", 'a', 'b'])])
# ========================================
print("\n\n"+"="*50)
print("列表解析:")
#列表解析:在Python2中返回列表,在Python3中返回生成器。
# 注意:在Python3中,可用list()收集元素到列表中。
#结构:
"""
[expression for target1 [if condition1]
for target2 [if condition2]
for target3 [if condition3]
...
for targetN [if conditionN]]
"""
# 注意:一个for语句后仅可使用一个if测试。
# ========================================
# 嵌套列表解析:
print([p+q for p in "defg" for q in "abc"])
# 生成具有某种特征的列表。
print([x % 2 == 0 for x in range(1, 11)])
# 使用None生成列表。
print([None]*10)
# 带有条件表达式的列表解析。
print([x for x in range(10) if x % 2 == 0])
# 获取指定值。
L = [("bob",35,"mgr"),("mel",40,"dev")]
print([name for (name,age,job) in L]) # 取出姓名。
# ----------------------------------------
# 用列表解析获取嵌套列表的值。
L = [ [1,3,5,7], [2,4,6,3], [3,1,2,8], [6,4,0,7] ]
print([row[2] for row in L]) # 获取第三列。
print([L[row][2] for row in [0,1,2,3]]) # 获取第三列。
print([row[2] for row in L if row[2]%2 == 0]) #第三列的偶数。
print([L[i][i] for i in [0,1,2,3]])# 获取对角元素值。
print([sum(row) for row in L]) # 求每行元素的和。
M = [[1,2,3],[4,5,6],[7,8,9]]
N = [[2,2,2],[3,3,3],[4,4,4]]
print([M[row][i]*N[row][i] for row in range(3) for i in range(3)])
print("M与N对应元素乘积:%s" % [[M[row][i]*N[row][i] for row in range(3)] for i in range(3)])
# ----------------------------------------
print( "\n\n" + "-"*40 )
print( "用列表解析除去文件中每行末尾的换行符:" )
#[line.rstrip() for line in open(filename).readlines()]
#[line.rstrip() for line in open(filename)]
#注意:后一种方法使用了文件迭代器。《Python学习手册4》
# ========================================
# ----------------------------------------
# 集合解析:{f(x) for x in S if P(x)} # 仅Python3可用。
# 类似生成器表达式set(f(x)for x in S if P(x))
print({row[0] for row in L}) # 集合解析。
# 字典解析:{key:val for (key,val) in zip(keys_list,vals_list)} # 仅Python3可用。
# 类似dict(zip(keys,vals))。
# 类似生成器表达式dict((x,f(x))for x in items)
print({i:ord(i) for i in "abcdef"}) # 字典解析。
# ========================================
print("\n\n"+"="*50)
print("列表的多重嵌套:" )
# 列表的多重嵌套。
print("展平(展开、合并)二重列表:")
# 展平(展开、合并)二重列表
# 仅含数字的列表
# 注意:这样的不行:[[1,2], [3], [4,5,6], [7,8,9,0], 11]
L = [[1,2], [3], [4,5,6], [7,8,9,0]]
print(sum(L, []))
import itertools # 用来展开一般嵌套列表
# 利用itertools库展平嵌套列表
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
import itertools
data = [[1, 2, 'b'], ['a', 5, 6]]
print(list(itertools.chain.from_iterable(data)))
# 再或者
from functools import reduce
from operator import add
data = [[1, 2, 'b'], ['a', 5, 6]]
print(reduce(add, data))
# 仅含字符串的列表
# 注意:这样的不行:[["a","b","c"],["d","e"],["f"],"g"]
L = [["a","b","c"], ["d","e"], ["f"]]
print(sum(L, []))
# 利用itertools
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
# 混合列表
L = [["a","b","c"], [1,2,"d","e"], [3,4]]
print(sum(L, []))
# 利用itertools
# 注意:itertools.chain()对下面的多重嵌套列表无效
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
#----------------------------------------------
print("\n\n"+"-"*50)
print("展开多重嵌套列表:")
# 展开多重列表
L = [["a","b",["c",[1,[2],"d"],"e"]],[3,'f'],4,"g",5]
# 递归遍历。
def flattenList(L,new_list=[]):
# 展平多重列表
for i in L:
if not isinstance(i,list):
new_list.append(i)
else:
flattenList(i,new_list)
return new_list
print(flattenList(L))
# 方法三:用递归中的奇技淫巧。
func = lambda L: sum(map(func,L),[]) if isinstance(L,list) else [L]
new_str = func(L)
print(new_str)
s = [["a","b",["c",[1,[2],"d"],"e"]],[3,'f'],4,"g",5]
flat=lambda L: sum(map(flat,L),[]) if isinstance(L,list) else [L]
print(flat(s))
flatten = lambda x: [y for l in x for y in flatten(l)] if type(x) is list else [x]
print(flatten(s))
LIST = [1, "a", ["b", 2, [3, "c", 4], "d", 5], 6, "e", [7, "f", 8], 9, "g", 0]
print(flatten(LIST))
# 将以下递归函数写到一个类中:
def printList(L, newList=list()):
# 展平多元列表
for x in L:
if isinstance(x, list):
printList(x,newList)
else:
newList.append((x))
return newList
print(printList(LIST))
# 写到类中:
class PrintList(object):
def printList(self, L, newList=list()):
self.L = L
for x in L:
if isinstance(x, list):
self.printList(x,newList)
else:
newList.append((x))
return newList
a = PrintList()
print(a.printList(LIST))
#----------------------------------------------
# 类似列表解析的例子:
# 例子:
S = set()
for i in [1,2,3,4]:
S.add(i)
print(S)
# 相当于:
S = {i for i in [1,2,3,4]}
print(S)
# ======================
# 例子:
D = dict()
for key, value in [('a',1), ('b',2)]:
D[key] = value
print(D)
# 相当于:
D = {key:value for key,value in [('a',1), ('b',2)]}
print(D)
#==============================================
print("\n\n"+"="*50)
print("使用map函数:")
#map(func,iterable1,iterable2,...)函数的使用。
#传入单参数函数:map(lambda x: x+1, num_list)
#传入多参数函数:map(lambda x,y: x+y, numlist1, numlist2)
# 注意:map()函数的第一个参数是函数
# 对应元素想加:
print(list(map(lambda x,y: x+y, [1,2,3], [4,5,6])))
#print(list(map(sum, list_in_list)))
print(list(map(abs,[-1,3,-5,4,2])))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用reduce函数:")
# reduce(func,sequence) # 仅Python2,Python3移在functools模块
#reduce(functhon,sequence[,initializer])函数的使用。
#注意:在Python3中,该函数被移至functools。
#用reduce计算出所有数字的和:
import functools
print(functools.reduce(lambda x,y:x+y, [1,2,3]))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用filter函数:")
# filter(func, iterable) # 过滤器。返回iterable中所有使func的返回值为真的元素。注意:若func是None,则返回值等价于True的元素。
#filter(func,sequence)过滤器函数。
# 返回iterable中所有使function返回值为真的元素组成的列表。
seasons = ["Spring||","|","Summer||","|","Fall||","Winter||"]
print(list(filter(lambda x:x!="|",seasons)))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用zip函数:")
#zip()函数的使用。
#zip(list1,list2)
#用zip将先压缩,再还原。
L_zip = [(2,11),(4,13),(6,15),(8,17)]
print(list(zip(*L_zip)))
L_zip2 = [(2, 4, 6, 8), (11, 13, 15, 17)]
print(list(zip(*L_zip2)))
#将字典的键和值颠倒:一旦值有重复,将丢弃多余的。
D = {1:"a",2:"c",3:"c"}
print(dict(zip(D.values(),D.keys())))
#==============================================
print("\n\n"+"="*50)
# 这里两个结果并不一样,具体原因我暂时也不知道
def list_feature01():
def func1():
a = ['a', 'b', 'c']
m = a[1:2]
n = a[1:2]
print(m is n)
print(id(m) == id(n))
def func2():
a = ['a', 'b', 'c']
print(a[1:2] is a[1:2])
print(id(a[1:2]) == id(a[1:2]))
func1()
func2()
| identifier_name | ||
LIST.py | #coding=utf-8
# ========================================
# 注意0:列表、字符串和元组都属于序列,但仅列表是可变对象
# 注意1:列表是一种可变(mutable)对象
# 注意2:列表方法,将直接改变列表结构
# 注意3:对列表的引用被修改,将直接改变原列表
# 注意4:在函数中引用列表,也将改变原列表
# 注意5:如果要禁止修改原列表,可在函数中生成对列表的完全拷贝
# 注意6:列表的插入和删除除非尾部元素时会涉及列表中大量元素的移动,效率较低
# 警告0:列表的可变性可能造成意想不到的bug!
# ========================================
# 注意:列表的元素可以是包含列表在内的任何类型数据
# 举例:
# [1, 2, 3, 4, 5]
# ['Tom', 'Joy', 'Lily', 'Harry']
# [5, 3.2, 'hello', ('a', 2, 3), [5, 6.2, 'world']]
# ["a", 3, "c", 1, "b", 2, "d", 4, "e"]
# ========================================
# 列表的索引和切片操作
# 注意:索引操作返回非独立于列表的元素
# 注意:切片操作返回独立于列表的元素拷贝列表
# 列表的索引
def list_index():
L = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 列表的每个元素都有一个序号,叫索引值,它标志元素的位置
# 列表有两套标记索引值的方法,一套正着数,一套倒着数
# 元素的索引值正着数(从左到右)依次为0, 1, 2, 3,...
# 元素的索引值倒着数(从右到左)依次为-1, -2, -3,...
# 获取列表的元素,要用索引操作
# 例如,要获取元素的第一个值,可以用正索引值:
print(L[0])
# 也可以用倒索引值:
print(L[-5])
# 而要获取元素的最后一个值,可以用正索引值:
print(L[4])
# 也可以用倒索引值:
print(L[-1])
# 注意:用不存在的索引值进行索引操作将引发异常IndexError
# print(L[7])
# 注意:索引操作,返回列表元素的非独立拷贝!与切片不同!
# 例如,以下结果返回True:
print(L[-1] is L[-1])
print(id(L[-1]) == id(L[-1]))
# 列表的切片
def list_slice():
L = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 获取列表的多个元素,可以使用列表的切片操作
# 切片操作有多种形式,如:
# 获取索引值在0~4(不含)区间的元素
print(L[0:4])
# 当起止切片的索引值在开头或结尾时,也可省略起止索引值
print(L[:4])
# 或者,用倒索引值也可以
print(L[-7:-3])
# 同样,使用倒索引也可以省略起止索引值
print(L[:-3])
# 甚至,还可以混合使用正索引值和倒索引值
print(L[0:-3])
# 或
print(L[-7:4])
# 注意:与索引操作不同,切片操作对不存在的索引值比较宽容
# 例如,获取索引值在0~4(不含)区间的元素,还可以用
print(L[-100:4])
# 因为会使写代码意图不明确,因此并不推荐如上用法
# --------------------
# 获取列表的完全拷贝,可以用
print(L[0:])
# 也可以利用切片操作对不存在的索引值宽容的特性写成
print(L[0:7])
# 或者,直接用省略起止索引值的办法(推荐)进行操作
print(L[:])
# --------------------
# 若需要获取列表中具有固定间隔的元素时,可以增加第三个数字
# 这种切片操作叫“步进式切片操作”
# 例如,每两个元素获取一个元素
print(L[0:7:2])
# 也可以用省略起止索引值的办法(推荐)进行操作
print(L[::2])
# --------------------
# 当第三个参数为负数时,将会进行逆序切片操作
# 例如,获取原列表的完整逆序拷贝
print(L[::-1])
# --------------------
# 这样,获取原列表的方法其实还有不传入第三个参数的办法
print(L[::])
# --------------------
# 注意:与索引操作不同,所有切片操作获得列表元素的浅拷贝
# 例如,以下结果返回False
print(L[0:1] is L[0:1])
print(id(L[0:1]) == id(L[0:1]))
if __name__ == '__main__':
list_index()
list_slice()
pass
#__import__("sys").exit()
# ========================================
# 列表方法
def list_method():
# 列表的方法,用于实现对列表元素的排序,以及增、删、改、查
# 列表的排序方法:sort(对混合列表无效)
# 注意:sort方法在原内存地址修改列表
# 注意:sort不能对['b', 1, 'a', 2]这样的混合列表排序
L1 = [1,3,2]
# 注意:修改操作在原内存地址进行,返回值为None
L1.sort()
print(L1)
L2 = ['c', 'a', 'b']
L2.sort()
print(L2)
# --------------------
# 列表的反转:reverse
# 注意:效果与切片操作list[::-1]相同,但是切片生成一个拷贝。
L = [1,3,2]
L.reverse()
print(L)
mixed_L = ["a", 3, "c", 1, "b", 2]
mixed_L.reverse()
#__import__("sys").exit()
# 插入list.insert():指定索引之前插入元素,其它元素后移。
# 注意:list[n] = "something"替换索引元素。
L = ["c","a","b"]
L.insert(2,"k")
print(L)
# 扩展list1.extend(list2)。改变原列表。
# 注意:合并操作list1 + list2生成一个拷贝。
L = [1,2]
L.extend(["a", "b"])
print(L)
# 追加list.append(item):在列表末尾增加元素。改变原列表。
L = [1,2]
L.append("abc")
print(L)
# 统计list.count(item):返回某个元素在列表中出现的频率。
# 注意:想一次统计多个元素,可参考模块collections。
L = ["a","b","c","c","d","c"]
print(L.count("c"))
# 索引list.index(item):返回列表中首次出现的索引。
#注意:对不存在的元素索引将引发ValueError。
L = ["a","b","c","c","d","c"]
print(L.index("c"))
# 列表值弹出list.pop(index):弹出指定索引元素并返回弹出元素。
# 注意:默认弹出最后一个元素。可用pop(3)弹出索引为3的元素。
# 注意:对于字典,pop(key)指弹出指定的键所对应的值。
L = ["a","b","d","c"]
print(L.pop(2))
print(L)
# 元素删除list.remove(item):移除某个值的首个匹配项。
L = ["a","b","c"]
L.remove("c")
print(L)
#同时遍历列表的索引和值:enumerate(list)
print(["%s:%s"%(index,item) for index,item in enumerate(["c", 'a', 'b'])])
# ========================================
print("\n\n"+"="*50)
print("列表解析:")
#列表解析:在Python2中返回列表,在Python3中返回生成器。
# 注意:在Python3中,可用list()收集元素到列表中。
#结构:
"""
[expression for target1 [if condition1]
for target2 [if condition2]
for target3 [if condition3]
...
for targetN [if conditionN]]
"""
# 注意:一个for语句后仅可使用一个if测试。
# ========================================
# 嵌套列表解析:
print([p+q for p in "defg" for q in "abc"])
# 生成具有某种特征的列表。
print([x % 2 == 0 for x in range(1, 11)])
# 使用None生成列表。
print([None]*10)
# 带有条件表达式的列表解析。
print([x for x in range(10) if x % 2 == 0])
# 获取指定值。
L = [("bob",35,"mgr"),("mel",40,"dev")]
print([name for (name,age,job) in L]) # 取出姓名。
# ----------------------------------------
# 用列表解析获取嵌套列表的值。
L = [ [1,3,5,7], [2,4,6,3], [3,1,2,8], [6,4,0,7] ]
print([row[2] for row in L]) # 获取第三列。
print([L[row][2] for row in [0,1,2,3]]) # 获取第三列。
print([row[2] for row in L if row[2]%2 == 0]) #第三列的偶数。
print([L[i][i] for i in [0,1,2,3]])# 获取对角元素值。
print([sum(row) for row in L]) # 求每行元素的和。
M = [[1,2,3],[4,5,6],[7,8,9]]
N = [[2,2,2],[3,3,3],[4,4,4]]
print([M[row][i]*N[row][i] for row in range(3) for i in range(3)])
print("M与N对应元素乘积:%s" % [[M[row][i]*N[row][i] for row in range(3)] for i in range(3)])
# ----------------------------------------
print( "\n\n" + "-"*40 )
print( "用列表解析除去文件中每行末尾的换行符:" )
#[line.rstrip() for line in open(filename).readlines()]
#[line.rstrip() for line in open(filename)]
#注意:后一种方法使用了文件迭代器。《Python学习手册4》
# ========================================
# ----------------------------------------
# 集合解析:{f(x) for x in S if P(x)} # 仅Python3可用。
# 类似生成器表达式set(f(x)for x in S if P(x))
print({row[0] for row in L}) # 集合解析。
# 字典解析:{key:val for (key,val) in zip(keys_list,vals_list)} # 仅Python3可用。
# 类似dict(zip(keys,vals))。
# 类似生成器表达式dict((x,f(x))for x in items)
print({i:ord(i) for i in "abcdef"}) # 字典解析。
# ========================================
print("\n\n"+"="*50)
print("列表的多重嵌套:" )
# 列表的多重嵌套。
print("展平(展开、合并)二重列表:")
# 展平(展开、合并)二重列表
# 仅含数字的列表
# 注意:这样的不行:[[1,2], [3], [4,5,6], [7,8,9,0], 11]
L = [[1,2], [3], [4,5,6], [7,8,9,0]]
print(sum(L, []))
import itertools # 用来展开一般嵌套列表
# 利用itertools库展平嵌套列表
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
import itertools
data = [[1, 2, 'b'], ['a', 5, 6]]
print(list(itertools.chain.from_iterable(data)))
# 再或者
from functools import reduce
from operator import add
data = [[1, 2, 'b'], ['a', 5, 6]]
print(reduce(add, data))
# 仅含字符串的列表
# 注意:这样的不行:[["a","b","c"],["d","e"],["f"],"g"]
L = [["a","b","c"], ["d","e"], ["f"]]
print(sum(L, []))
# 利用itertools
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
# 混合列表
L = [["a","b","c"], [1,2,"d","e"], [3,4]]
print(sum(L, []))
# 利用itertools
# 注意:itertools.chain()对下面的多重嵌套列表无效
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
#----------------------------------------------
print("\n\n"+"-"*50)
print("展开多重嵌套列表:")
# 展开多重列表
L = [["a","b",["c",[1,[2],"d"],"e"]],[3,'f'],4,"g",5]
# 递归遍历。
def flattenList(L,new_list=[]):
# 展平多重列表
for i in L:
if not isinstance(i,list):
new_list.append(i)
else:
flattenList(i,new_list)
return new_list
print(flattenList(L))
# 方法三:用递归中的奇技淫巧。
func = lambda L: sum(map(func,L),[]) if isinstance(L,list) else [L]
new_str = func(L)
print(new_str)
s = [["a","b",["c",[1,[2],"d"],"e"]],[3,'f'],4,"g",5]
flat=lambda L: sum(map(flat,L),[]) if isinstance(L,list) else [L]
print(flat(s))
flatten = lambda x: [y for l in x for y in flatten(l)] if type(x) is list else [x]
print(flatten(s))
LIST = [1, "a", ["b", 2, [3, "c", 4], "d", 5], 6, "e", [7, "f", 8], 9, "g", 0]
print(flatten(LIST))
# 将以下递归函数写到一个类中:
def printList(L, newList=list()):
# 展平多元列表
for x in L:
if isinstance(x, list):
printList(x,newList)
else:
newList.append((x))
return newList
print(printList(LIST))
# 写到类中:
class PrintList(object):
def printList(self, L, newList=list()):
self.L = L
for x in L:
if isinstance(x, list):
self.printList(x,newList)
else:
newList.append((x))
return newList
a = PrintList()
print(a.printList(LIST))
#----------------------------------------------
# 类似列表解析的例子:
# 例子:
S = set()
for i in [1,2,3,4]:
S.add(i)
print(S)
# 相当于:
S = {i for i in [1,2,3,4]}
print(S)
# ======================
# 例子:
D = dict()
for key, value in [('a',1), ('b',2)]:
D[key] = value
print(D)
# 相当于:
D = {key:value for key,value in [('a',1), ('b',2)]}
print(D)
#==============================================
print("\n\n"+"="*50)
print("使用map函数:")
#map(func,iterable1,iterable2,...)函数的使用。
#传入单参数函数:map(lambda x: x+1, num_list)
#传入多参数函数:map(lambda x,y: x+y, numlist1, numlist2)
# 注意:map()函数的第一个参数是函数
# 对应元素想加:
print(list(map(lambda x,y: x+y, [1,2,3], [4,5,6])))
#print(list(map(sum, list_in_list)))
print(list(map(abs,[-1,3,-5,4,2])))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用reduce函数:")
# reduce(func,sequence) # 仅Python2,Python3移在functools模块
#reduce(functhon,sequence[,initializer])函数的使用。
#注意:在Python3中,该函数被移至functools。
#用reduce计算出所有数字的和:
import functools
print(functools.reduce(lambda x,y:x+y, [1,2,3]))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用filter函数:")
# filter(func, iterable) # 过滤器。返回iterable中所有使func的返回值为真的元素。注意:若func是None,则返回值等价于True的元素。
#filter(func,sequence)过滤器函数。
# 返回iterable中所有使function返回值为真的元素组成的列表。
seasons = ["Spring||","|","Summer||","|","Fall||","Winter||"]
print(list(filter(lambda x:x!="|",seasons)))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用zip函数:")
#zip()函数的使用。
#zip(list1,list2)
#用zip将先压缩,再还原。
L_zip = [(2,11),(4,13),(6,15),(8,17)]
print(list(zip(*L_zip)))
L_zip2 = [(2, 4, 6, 8), (11, 13, 15, 17)]
print(list(zip(*L_zip2)))
#将字典的键和值颠倒:一旦值有重复,将丢弃多余的。
D = {1:"a",2:"c",3:"c"}
print(dict(zip(D.values(),D.keys())))
#==============================================
print("\n\n"+"="*50)
# 这里两个结果并不一样,具体原因我暂时也不知道
def list_feature01():
def func1():
a = ['a', 'b', 'c']
m = a[1:2]
n = a[1:2]
print(m is n)
print(id(m) == id(n))
def func2():
a = ['a', 'b', 'c']
print(a[1:2] is a[1:2])
print(id(a[1:2]) == id(a[1:2]))
func1()
func2()
| identifier_body | ||
LIST.py | #coding=utf-8
# ========================================
# 注意0:列表、字符串和元组都属于序列,但仅列表是可变对象
# 注意1:列表是一种可变(mutable)对象
# 注意2:列表方法,将直接改变列表结构
# 注意3:对列表的引用被修改,将直接改变原列表
# 注意4:在函数中引用列表,也将改变原列表
# 注意5:如果要禁止修改原列表,可在函数中生成对列表的完全拷贝
# 注意6:列表的插入和删除除非尾部元素时会涉及列表中大量元素的移动,效率较低
# 警告0:列表的可变性可能造成意想不到的bug!
# ========================================
# 注意:列表的元素可以是包含列表在内的任何类型数据
# 举例:
# [1, 2, 3, 4, 5]
# ['Tom', 'Joy', 'Lily', 'Harry']
# [5, 3.2, 'hello', ('a', 2, 3), [5, 6.2, 'world']]
# ["a", 3, "c", 1, "b", 2, "d", 4, "e"]
# ========================================
# 列表的索引和切片操作
# 注意:索引操作返回非独立于列表的元素
# 注意:切片操作返回独立于列表的元素拷贝列表
# 列表的索引
def list_index():
L = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 列表的每个元素都有一个序号,叫索引值,它标志元素的位置
# 列表有两套标记索引值的方法,一套正着数,一套倒着数
# 元素的索引值正着数(从左到右)依次为0, 1, 2, 3,...
# 元素的索引值倒着数(从右到左)依次为-1, -2, -3,...
# 获取列表的元素,要用索引操作
# 例如,要获取元素的第一个值,可以用正索引值:
print(L[0])
# 也可以用倒索引值:
print(L[-5])
# 而要获取元素的最后一个值,可以用正索引值:
print(L[4])
# 也可以用倒索引值:
print(L[-1])
# 注意:用不存在的索引值进行索引操作将引发异常IndexError
# print(L[7])
# 注意:索引操作,返回列表元素的非独立拷贝!与切片不同!
# 例如,以下结果返回True:
print(L[-1] is L[-1])
print(id(L[-1]) == id(L[-1]))
# 列表的切片
def list_slice():
L = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 获取列表的多个元素,可以使用列表的切片操作
# 切片操作有多种形式,如:
# 获取索引值在0~4(不含)区间的元素
print(L[0:4])
# 当起止切片的索引值在开头或结尾时,也可省略起止索引值
print(L[:4])
# 或者,用倒索引值也可以
print(L[-7:-3])
# 同样,使用倒索引也可以省略起止索引值
print(L[:-3])
# 甚至,还可以混合使用正索引值和倒索引值
print(L[0:-3])
# 或
print(L[-7:4])
# 注意:与索引操作不同,切片操作对不存在的索引值比较宽容
# 例如,获取索引值在0~4(不含)区间的元素,还可以用
print(L[-100:4])
# 因为会使写代码意图不明确,因此并不推荐如上用法
# --------------------
# 获取列表的完全拷贝,可以用
print(L[0:])
# 也可以利用切片操作对不存在的索引值宽容的特性写成
print(L[0:7])
# 或者,直接用省略起止索引值的办法(推荐)进行操作
print(L[:])
# --------------------
# 若需要获取列表中具有固定间隔的元素时,可以增加第三个数字
# 这种切片操作叫“步进式切片操作”
# 例如,每两个元素获取一个元素
print(L[0:7:2])
# 也可以用省略起止索引值的办法(推荐)进行操作
print(L[::2])
# --------------------
# 当第三个参数为负数时,将会进行逆序切片操作
# 例如,获取原列表的完整逆序拷贝
print(L[::-1])
# --------------------
# 这样,获取原列表的方法其实还有不传入第三个参数的办法
print(L[::])
# --------------------
# 注意:与索引操作不同,所有切片操作获得列表元素的浅拷贝
# 例如,以下结果返回False
print(L[0:1] is L[0:1])
print(id(L[0:1]) == id(L[0:1]))
if __name__ == '__main__':
list_index()
list_slice()
pass
#__import__("sys").exit()
# ========================================
# 列表方法
def list_method():
# 列表的方法,用于实现对列表元素的排序,以及增、删、改、查
# 列表的排序方法:sort(对混合列表无效)
# 注意:sort方法在原内存地址修改列表
# 注意:sort不能对['b', 1, 'a', 2]这样的混合列表排序
L1 = [1,3,2]
# 注意:修改操作在原内存地址进行,返回值为None
L1.sort()
print(L1)
L2 = ['c', 'a', 'b']
L2.sort()
print(L2)
# --------------------
# 列表的反转:reverse
# 注意:效果与切片操作list[::-1]相同,但是切片生成一个拷贝。
L = [1,3,2]
L.reverse()
print(L)
mixed_L = ["a", 3, "c", 1, "b", 2]
mixed_L.reverse()
#__import__("sys").exit()
# 插入list.insert():指定索引之前插入元素,其它元素后移。
# 注意:list[n] = "something"替换索引元素。
L = ["c","a","b"]
L.insert(2,"k")
print(L)
# 扩展list1.extend(list2)。改变原列表。
# 注意:合并操作list1 + list2生成一个拷贝。
L = [1,2]
L.extend(["a", "b"])
print(L)
# 追加list.append(item):在列表末尾增加元素。改变原列表。
L = [1,2]
L.append("abc")
print(L)
# 统计list.count(item):返回某个元素在列表中出现的频率。
# 注意:想一次统计多个元素,可参考模块collections。
L = ["a","b","c","c","d","c"]
print(L.count("c"))
# 索引list.index(item):返回列表中首次出现的索引。
#注意:对不存在的元素索引将引发ValueError。
L = ["a","b","c","c","d","c"]
print(L.index("c"))
# 列表值弹出list.pop(index):弹出指定索引元素并返回弹出元素。
# 注意:默认弹出最后一个元素。可用pop(3)弹出索引为3的元素。
# 注意:对于字典,pop(key)指弹出指定的键所对应的值。
L = ["a","b","d","c"]
print(L.pop(2))
print(L)
# 元素删除list.remove(item):移除某个值的首个匹配项。
L = ["a","b","c"]
L.remove("c")
print(L)
#同时遍历列表的索引和值:enumerate(list)
print(["%s:%s"%(index,item) for index,item in enumerate(["c", 'a', 'b'])])
# ========================================
print("\n\n"+"="*50)
print("列表解析:")
#列表解析:在Python2中返回列表,在Python3中返回生成器。
# 注意:在Python3中,可用list()收集元素到列表中。
#结构:
"""
[expression for target1 [if condition1]
for target2 [if condition2]
for target3 [if condition3]
...
for targetN [if conditionN]]
"""
# 注意:一个for语句后仅可使用一个if测试。
# ========================================
# 嵌套列表解析:
print([p+q for p in "defg" for q in "abc"])
# 生成具有某种特征的列表。
print([x % 2 == 0 for x in range(1, 11)])
# 使用None生成列表。
print([None]*10)
# 带有条件表达式的列表解析。
print([x for x in range(10) if x % 2 == 0])
# 获取指定值。
L = [("bob",35,"mgr"),("mel",40,"dev")]
print([name for (name,age,job) in L]) # 取出姓名。
# ----------------------------------------
# 用列表解析获取嵌套列表的值。
L = [ [1,3,5,7], [2,4,6,3], [3,1,2,8], [6,4,0,7] ]
print([row[2] for row in L]) # 获取第三列。
print([L[row][2] for row in [0,1,2,3]]) # 获取第三列。
print([row[2] for row in L if row[2]%2 == 0]) #第三列的偶数。
print([L[i][i] for i in [0,1,2,3]])# 获取对角元素值。
print([sum(row) for row in L]) # 求每行元素的和。
M = [[1,2,3],[4,5,6],[7,8,9]]
N = [[2,2,2],[3,3,3],[4,4,4]]
print([M[row][i]*N[row][i] for row in range(3) for i in range(3)])
print("M与N对应元素乘积:%s" % [[M[row][i]*N[row][i] for row in range(3)] for i in range(3)])
# ----------------------------------------
print( "\n\n" + "-"*40 )
print( "用列表解析除去文件中每行末尾的换行符:" )
#[line.rstrip() for line in open(filename).readlines()]
#[line.rstrip() for line in open(filename)]
#注意:后一种方法使用了文件迭代器。《Python学习手册4》
# ========================================
# ----------------------------------------
# 集合解析:{f(x) for x in S if P(x)} # 仅Python3可用。
# 类似生成器表达式set(f(x)for x in S if P(x))
print({row[0] for row in L}) # 集合解析。
# 字典解析:{key:val for (key,val) in zip(keys_list,vals_list)} # 仅Python3可用。
# 类似dict(zip(keys,vals))。
# 类似生成器表达式dict((x,f(x))for x in items)
print({i:ord(i) for i in "abcdef"}) # 字典解析。
# ========================================
print("\n\n"+"="*50)
print("列表的多重嵌套:" )
# 列表的多重嵌套。
print("展平(展开、合并)二重列表:")
# 展平(展开、合并)二重列表
# 仅含数字的列表
# 注意:这样的不行:[[1,2], [3], [4,5,6], [7,8,9,0], 11]
L = [[1,2], [3], [4,5,6], [7,8,9,0]]
print(sum(L, []))
import itertools # 用来展开一般嵌套列表
# 利用itertools库展平嵌套列表
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
import itertools
data = [[1, 2, 'b'], ['a', 5, 6]]
print(list(itertools.chain.from_iterable(data)))
# 再或者
from functools import reduce
from operator import add
data = [[1, 2, 'b'], ['a', 5, 6]]
print(reduce(add, data))
# 仅含字符串的列表
# 注意:这样的不行:[["a","b","c"],["d","e"],["f"],"g"]
L = [["a","b","c"], ["d","e"], ["f"]]
print(sum(L, []))
# 利用itertools
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
# 混合列表
L = [["a","b","c"], [1,2,"d","e"], [3,4]]
print(sum(L, []))
# 利用itertools
# 注意:itertools.chain()对下面的多重嵌套列表无效
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
#----------------------------------------------
print("\n\n"+"-"*50)
print("展开多重嵌套列表:")
# 展开多重列表 | for i in L:
if not isinstance(i,list):
new_list.append(i)
else:
flattenList(i,new_list)
return new_list
print(flattenList(L))
# 方法三:用递归中的奇技淫巧。
func = lambda L: sum(map(func,L),[]) if isinstance(L,list) else [L]
new_str = func(L)
print(new_str)
s = [["a","b",["c",[1,[2],"d"],"e"]],[3,'f'],4,"g",5]
flat=lambda L: sum(map(flat,L),[]) if isinstance(L,list) else [L]
print(flat(s))
flatten = lambda x: [y for l in x for y in flatten(l)] if type(x) is list else [x]
print(flatten(s))
LIST = [1, "a", ["b", 2, [3, "c", 4], "d", 5], 6, "e", [7, "f", 8], 9, "g", 0]
print(flatten(LIST))
# 将以下递归函数写到一个类中:
def printList(L, newList=list()):
# 展平多元列表
for x in L:
if isinstance(x, list):
printList(x,newList)
else:
newList.append((x))
return newList
print(printList(LIST))
# 写到类中:
class PrintList(object):
def printList(self, L, newList=list()):
self.L = L
for x in L:
if isinstance(x, list):
self.printList(x,newList)
else:
newList.append((x))
return newList
a = PrintList()
print(a.printList(LIST))
#----------------------------------------------
# 类似列表解析的例子:
# 例子:
S = set()
for i in [1,2,3,4]:
S.add(i)
print(S)
# 相当于:
S = {i for i in [1,2,3,4]}
print(S)
# ======================
# 例子:
D = dict()
for key, value in [('a',1), ('b',2)]:
D[key] = value
print(D)
# 相当于:
D = {key:value for key,value in [('a',1), ('b',2)]}
print(D)
#==============================================
print("\n\n"+"="*50)
print("使用map函数:")
#map(func,iterable1,iterable2,...)函数的使用。
#传入单参数函数:map(lambda x: x+1, num_list)
#传入多参数函数:map(lambda x,y: x+y, numlist1, numlist2)
# 注意:map()函数的第一个参数是函数
# 对应元素想加:
print(list(map(lambda x,y: x+y, [1,2,3], [4,5,6])))
#print(list(map(sum, list_in_list)))
print(list(map(abs,[-1,3,-5,4,2])))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用reduce函数:")
# reduce(func,sequence) # 仅Python2,Python3移在functools模块
#reduce(functhon,sequence[,initializer])函数的使用。
#注意:在Python3中,该函数被移至functools。
#用reduce计算出所有数字的和:
import functools
print(functools.reduce(lambda x,y:x+y, [1,2,3]))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用filter函数:")
# filter(func, iterable) # 过滤器。返回iterable中所有使func的返回值为真的元素。注意:若func是None,则返回值等价于True的元素。
#filter(func,sequence)过滤器函数。
# 返回iterable中所有使function返回值为真的元素组成的列表。
seasons = ["Spring||","|","Summer||","|","Fall||","Winter||"]
print(list(filter(lambda x:x!="|",seasons)))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用zip函数:")
#zip()函数的使用。
#zip(list1,list2)
#用zip将先压缩,再还原。
L_zip = [(2,11),(4,13),(6,15),(8,17)]
print(list(zip(*L_zip)))
L_zip2 = [(2, 4, 6, 8), (11, 13, 15, 17)]
print(list(zip(*L_zip2)))
#将字典的键和值颠倒:一旦值有重复,将丢弃多余的。
D = {1:"a",2:"c",3:"c"}
print(dict(zip(D.values(),D.keys())))
#==============================================
print("\n\n"+"="*50)
# 这里两个结果并不一样,具体原因我暂时也不知道
def list_feature01():
def func1():
a = ['a', 'b', 'c']
m = a[1:2]
n = a[1:2]
print(m is n)
print(id(m) == id(n))
def func2():
a = ['a', 'b', 'c']
print(a[1:2] is a[1:2])
print(id(a[1:2]) == id(a[1:2]))
func1()
func2() | L = [["a","b",["c",[1,[2],"d"],"e"]],[3,'f'],4,"g",5]
# 递归遍历。
def flattenList(L,new_list=[]):
# 展平多重列表 | random_line_split |
LIST.py | #coding=utf-8
# ========================================
# 注意0:列表、字符串和元组都属于序列,但仅列表是可变对象
# 注意1:列表是一种可变(mutable)对象
# 注意2:列表方法,将直接改变列表结构
# 注意3:对列表的引用被修改,将直接改变原列表
# 注意4:在函数中引用列表,也将改变原列表
# 注意5:如果要禁止修改原列表,可在函数中生成对列表的完全拷贝
# 注意6:列表的插入和删除除非尾部元素时会涉及列表中大量元素的移动,效率较低
# 警告0:列表的可变性可能造成意想不到的bug!
# ========================================
# 注意:列表的元素可以是包含列表在内的任何类型数据
# 举例:
# [1, 2, 3, 4, 5]
# ['Tom', 'Joy', 'Lily', 'Harry']
# [5, 3.2, 'hello', ('a', 2, 3), [5, 6.2, 'world']]
# ["a", 3, "c", 1, "b", 2, "d", 4, "e"]
# ========================================
# 列表的索引和切片操作
# 注意:索引操作返回非独立于列表的元素
# 注意:切片操作返回独立于列表的元素拷贝列表
# 列表的索引
def list_index():
L = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 列表的每个元素都有一个序号,叫索引值,它标志元素的位置
# 列表有两套标记索引值的方法,一套正着数,一套倒着数
# 元素的索引值正着数(从左到右)依次为0, 1, 2, 3,...
# 元素的索引值倒着数(从右到左)依次为-1, -2, -3,...
# 获取列表的元素,要用索引操作
# 例如,要获取元素的第一个值,可以用正索引值:
print(L[0])
# 也可以用倒索引值:
print(L[-5])
# 而要获取元素的最后一个值,可以用正索引值:
print(L[4])
# 也可以用倒索引值:
print(L[-1])
# 注意:用不存在的索引值进行索引操作将引发异常IndexError
# print(L[7])
# 注意:索引操作,返回列表元素的非独立拷贝!与切片不同!
# 例如,以下结果返回True:
print(L[-1] is L[-1])
print(id(L[-1]) == id(L[-1]))
# 列表的切片
def list_slice():
L = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 获取列表的多个元素,可以使用列表的切片操作
# 切片操作有多种形式,如:
# 获取索引值在0~4(不含)区间的元素
print(L[0:4])
# 当起止切片的索引值在开头或结尾时,也可省略起止索引值
print(L[:4])
# 或者,用倒索引值也可以
print(L[-7:-3])
# 同样,使用倒索引也可以省略起止索引值
print(L[:-3])
# 甚至,还可以混合使用正索引值和倒索引值
print(L[0:-3])
# 或
print(L[-7:4])
# 注意:与索引操作不同,切片操作对不存在的索引值比较宽容
# 例如,获取索引值在0~4(不含)区间的元素,还可以用
print(L[-100:4])
# 因为会使写代码意图不明确,因此并不推荐如上用法
# --------------------
# 获取列表的完全拷贝,可以用
print(L[0:])
# 也可以利用切片操作对不存在的索引值宽容的特性写成
print(L[0:7])
# 或者,直接用省略起止索引值的办法(推荐)进行操作
print(L[:])
# --------------------
# 若需要获取列表中具有固定间隔的元素时,可以增加第三个数字
# 这种切片操作叫“步进式切片操作”
# 例如,每两个元素获取一个元素
print(L[0:7:2])
# 也可以用省略起止索引值的办法(推荐)进行操作
print(L[::2])
# --------------------
# 当第三个参数为负数时,将会进行逆序切片操作
# 例如,获取原列表的完整逆序拷贝
print(L[::-1])
# --------------------
# 这样,获取原列表的方法其实还有不传入第三个参数的办法
print(L[::])
# --------------------
# 注意:与索引操作不同,所有切片操作获得列表元素的浅拷贝
# 例如,以下结果返回False
print(L[0:1] is L[0:1])
print(id(L[0:1]) == id(L[0:1]))
if __name__ == '__main__':
list_index()
list_slice()
pass
#__import__("sys").exit()
# ========================================
# 列表方法
def list_method():
# 列表的方法,用于实现对列表元素的排序,以及增、删、改、查
# 列表的排序方法:sort(对混合列表无效)
# 注意:sort方法在原内存地址修改列表
# 注意:sort不能对['b', 1, 'a', 2]这样的混合列表排序
L1 = [1,3,2]
# 注意:修改操作在原内存地址进行,返回值为None
L1.sort()
print(L1)
L2 = ['c', 'a', 'b']
L2.sort()
print(L2)
# --------------------
# 列表的反转:reverse
# 注意:效果与切片操作list[::-1]相同,但是切片生成一个拷贝。
L = [1,3,2]
L.reverse()
print(L)
mixed_L = ["a", 3, "c", 1, "b", 2]
mixed_L.reverse()
#__import__("sys").exit()
# 插入list.insert():指定索引之前插入元素,其它元素后移。
# 注意:list[n] = "something"替换索引元素。
L = ["c","a","b"]
L.insert(2,"k")
print(L)
# 扩展list1.extend(list2)。改变原列表。
# 注意:合并操作list1 + list2生成一个拷贝。
L = [1,2]
L.extend(["a", "b"])
print(L)
# 追加list.append(item):在列表末尾增加元素。改变原列表。
L = [1,2]
L.append("abc")
print(L)
# 统计list.count(item):返回某个元素在列表中出现的频率。
# 注意:想一次统计多个元素,可参考模块collections。
L = ["a","b","c","c","d","c"]
print(L.count("c"))
# 索引list.index(item):返回列表中首次出现的索引。
#注意:对不存在的元素索引将引发ValueError。
L = ["a","b","c","c","d","c"]
print(L.index("c"))
# 列表值弹出list.pop(index):弹出指定索引元素并返回弹出元素。
# 注意:默认弹出最后一个元素。可用pop(3)弹出索引为3的元素。
# 注意:对于字典,pop(key)指弹出指定的键所对应的值。
L = ["a","b","d","c"]
print(L.pop(2))
print(L)
# 元素删除list.remove(item):移除某个值的首个匹配项。
L = ["a","b","c"]
L.remove("c")
print(L)
#同时遍历列表的索引和值:enumerate(list)
print(["%s:%s"%(index,item) for index,item in enumerate(["c", 'a', 'b'])])
# ========================================
print("\n\n"+"="*50)
print("列表解析:")
#列表解析:在Python2中返回列表,在Python3中返回生成器。
# 注意:在Python3中,可用list()收集元素到列表中。
#结构:
"""
[expression for target1 [if condition1]
for target2 [if condition2]
for target3 [if condition3]
...
for targetN [if conditionN]]
"""
# 注意:一个for语句后仅可使用一个if测试。
# ========================================
# 嵌套列表解析:
print([p+q for p in "defg" for q in "abc"])
# 生成具有某种特征的列表。
print([x % 2 == 0 for x in range(1, 11)])
# 使用None生成列表。
print([None]*10)
# 带有条件表达式的列表解析。
print([x for x in range(10) if x % 2 == 0])
# 获取指定值。
L = [("bob",35,"mgr"),("mel",40,"dev")]
print([name for (name,age,job) in L]) # 取出姓名。
# ----------------------------------------
# 用列表解析获取嵌套列表的值。
L = [ [1,3,5,7], [2,4,6,3], [3,1,2,8], [6,4,0,7] ]
print([row[2] for row in L]) # 获取第三列。
print([L[row][2] for row in [0,1,2,3]]) # 获取第三列。
print([row[2] for row in L if row[2]%2 == 0]) #第三列的偶数。
print([L[i][i] for i in [0,1,2,3]])# 获取对角元素值。
print([sum(row) for row in L]) # 求每行元素的和。
M = [[1,2,3],[4,5,6],[7,8,9]]
N = [[2,2,2],[3,3,3],[4,4,4]]
print([M[row][i]*N[row][i] for row in range(3) for i in range(3)])
print("M与N对应元素乘积:%s" % [[M[row][i]*N[row][i] for row in range(3)] for i in range(3)])
# ----------------------------------------
print( "\n\n" + "-"*40 )
print( "用列表解析除去文件中每行末尾的换行符:" )
#[line.rstrip() for line in open(filename).readlines()]
#[line.rstrip() for line in open(filename)]
#注意:后一种方法使用了文件迭代器。《Python学习手册4》
# ========================================
# ----------------------------------------
# 集合解析:{f(x) for x in S if P(x)} # 仅Python3可用。
# 类似生成器表达式set(f(x)for x in S if P(x))
print({row[0] for row in L}) # 集合解析。
# 字典解析:{key:val for (key,val) in zip(keys_list,vals_list)} # 仅Python3可用。
# 类似dict(zip(keys,vals))。
# 类似生成器表达式dict((x,f(x))for x in items)
print({i:ord(i) for i in "abcdef"}) # 字典解析。
# ========================================
print("\n\n"+"="*50)
print("列表的多重嵌套:" )
# 列表的多重嵌套。
print("展平(展开、合并)二重列表:")
# 展平(展开、合并)二重列表
# 仅含数字的列表
# 注意:这样的不行:[[1,2], [3], [4,5,6], [7,8,9,0], 11]
L = [[1,2], [3], [4,5,6], [7,8,9,0]]
print(sum(L, []))
import itertools # 用来展开一般嵌套列表
# 利用itertools库展平嵌套列表
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
import itertools
data = [[1, 2, 'b'], ['a', 5, 6]]
print(list(itertools.chain.from_iterable(data)))
# 再或者
from functools import reduce
from operator import add
data = [[1, 2, 'b'], ['a', 5, 6]]
print(reduce(add, data))
# 仅含字符串的列表
# 注意:这样的不行:[["a","b","c"],["d","e"],["f"],"g"]
L = [["a","b","c"], ["d","e"], ["f"]]
print(sum(L, []))
# 利用itertools
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
# 混合列表
L = [["a","b","c"], [1,2,"d","e"], [3,4]]
print(sum(L, []))
# 利用itertools
# 注意:itertools.chain()对下面的多重嵌套列表无效
print(list(itertools.chain(*L)))
print(list(itertools.chain.from_iterable(L)))
#----------------------------------------------
print("\n\n"+"-"*50)
print("展开多重嵌套列表:")
# 展开多重列表
L = [["a","b",["c",[1,[2],"d"],"e"]],[3,'f'],4,"g",5]
# 递归遍历。
def flattenList(L,new_list=[]):
# 展平多重列表
for i in L:
if not isinstance(i,list):
new_list.append(i)
else:
flattenList(i,new_list)
return new_list
print(flattenList(L))
# 方法三:用递归中的奇技淫巧。
func = lambda L: sum(map(func,L),[]) if isinstance(L,list) else [L]
new_str = func(L)
print(new_str)
s = [["a","b",["c",[1,[2],"d"],"e"]],[3,'f'],4,"g",5]
flat=lambda L: sum(map(flat,L),[]) if isinstance(L,list) else [L]
print(flat(s))
flatten = lambda x: [y for l in x for y in flatten(l)] if type(x) is list else [x]
print(flatten(s))
LIST = [1, "a", ["b", 2, [3, "c", 4], "d", 5], 6, "e", [7, "f", 8], 9, "g", 0]
print(flatten(LIST))
# 将以下递归函数写到一个类中:
def printList(L, newList=list()):
# 展平多元列表
for x in L:
if isinstance(x, list):
printList(x,newList)
else:
newList.append((x))
return newList
print(printList(LIST))
# 写到类中:
class PrintList(object):
def printList(self, L, newList=list()):
self.L = L
for x in L:
if isinstance(x, list):
self.printList(x,newList)
else:
newList.append((x))
return newList
a = PrintList()
print(a.printList(LIST))
#----------------------------------------------
# 类似列表解析的例子:
# 例子:
S = set()
for i in [1,2,3,4]:
S.add(i)
print(S)
# 相当于:
S = {i for i in [1,2,3,4]}
print(S)
# ======================
# 例子:
D = dict()
for key, value in [('a',1), ('b',2)]:
D[key] = value
print(D)
# 相当于:
D = {key:value for key,value in [('a',1), ('b',2)]}
print(D)
#==============================================
print("\n\n"+"="*50)
print("使用map函数:")
#map(func,iterable1,iterable2,...)函数的使用。
#传入单参数函数:map(lambda x: x+1, num_list)
#传入多参数函数:map(lambda x,y: x+y, numlist1, numlist2)
# 注意:map()函数的第一个参数是函数
# 对应元素想加:
print(list(map(lambda x,y: x+y, [1,2,3], [4,5,6])))
#print(list(map(sum, list_in_list)))
print(list(map(abs,[-1,3,-5,4,2])))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用reduce函数:")
# reduce(func,sequence) # 仅Python2,Python3移在functools模块
#reduce(functhon,sequence[,initializer])函数的使用。
#注意:在Python3中,该函数被移至functools。
#用reduce计算出所有数字的和:
import functools
print(functools.reduce(lambda x,y:x+y, [1,2,3]))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用filter函数:")
# filter(func, iterable) # 过滤器。返回iterable中所有使func的返回值为真的元素。注意:若func是None,则返回值等价于True的元素。
#filter(func,sequence)过滤器函数。
# 返回iterable中所有使function返回值为真的元素组成的列表。
seasons = ["Spring||","|","Summer||","|","Fall||","Winter||"]
print(list(filter(lambda x:x!="|",seasons)))
#----------------------------------------------
print("\n\n"+"-"*50)
print("使用zip函数:")
#zip()函数的使用。
#zip(list1,list2)
#用zip将先压缩,再还原。
L_zip = [(2,11),(4,13),(6,15),(8,17)]
print(list(zip(*L_zip)))
L_zip2 = [(2, 4, 6, 8), (11, 13, 15, 17)]
print(list(zip(*L_zip2)))
#将字典的键和值颠倒:一旦值有重复,将丢弃多余的。
D = {1:"a",2:"c",3:"c"}
print(dict(zip(D.values(),D.keys())))
#==============================================
print("\n\n"+"="*50)
# 这里两个结果并不一样,具体原因我暂时也不知道
def list_feature01():
def func1():
a = ['a', 'b', 'c']
m = a[1:2]
n = a[1:2]
print(m is n)
print(id(m) == id(n))
def func2():
a = ['a', 'b', 'c']
print(a[1:2] is a[1:2])
print(id(a[1:2]) == id(a[1:2]))
func1()
func2()
| conditional_block | ||
part2.py | ################################################################################
#Michael Guerzhoy, 2016
#AlexNet implementation in TensorFlow, with weights
#Details:
#http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/
#
#With code from https://github.com/ethereon/caffe-tensorflow
#Model from https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet
#Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow
#
#
################################################################################
from numpy import *
import os
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import time
from scipy.misc import imread
from scipy.misc import imresize
import matplotlib.image as mpimg
from scipy.ndimage import filters
from scipy.io import loadmat, savemat
import urllib
from numpy import random
import tensorflow as tf
import cPickle
from actor_classes import class_names
# Initialize the actor names and their lastnames for later use
act = ['Gerard Butler', 'Daniel Radcliffe', 'Michael Vartan', 'Lorraine Bracco', 'Peri Gilpin', 'Angie Harmon']
lastname = ['butler', 'radcliffe', 'vartan', 'bracco', 'gilpin', 'harmon']
# Modify the dimension to vectorize the data
train_x = zeros((70, 227,227,3)).astype(float32)
train_y = zeros((1, 6))
xdim = train_x.shape[1:]
ydim = train_y.shape[1]
# Set up the directory locations
train_dir = 'training set/'
valid_dir = 'validation set/'
test_dir = 'test set/'
part2_dir = '/home/student/tf_alexnet/part2/'
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
'''From https://github.com/ethereon/caffe-tensorflow
'''
c_i = input.get_shape()[-1]
assert c_i%group==0
assert c_o%group==0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
if group==1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
conv = tf.concat(3, output_groups)
return tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape().as_list())
################################################################################
#Read Image
def create_new_input(dataset, datasize, n):
'''This function takes in all images in a given data set with given size and n is used to indicate the index of actor from the act list.
'''
actor_dir = act[n]+"/"
x_dummy = (random.random((datasize,)+ xdim)/255.).astype(float32)
i = x_dummy.copy()
if datasize == 70:
starting_index = 0
elif datasize == 20:
starting_index = 70
else:
starting_index = 90
for j in range(datasize): # This loop vectorizes the data
i[j,:,:,:] = (imread(dataset+actor_dir+lastname[n]+str(starting_index+j)+".jpg")[:,:,:3]).astype(float32)
i = i-mean(i)
net_data = load("../bvlc_alexnet.npy").item()
x = tf.Variable(i)
#conv1
#conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')
k_h = 11; k_w = 11; c_o = 96; s_h = 4; s_w = 4
conv1W = tf.Variable(net_data["conv1"][0])
conv1b = tf.Variable(net_data["conv1"][1])
conv1_in = conv(x, conv1W, conv1b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=1)
conv1 = tf.nn.relu(conv1_in)
#lrn1
#lrn(2, 2e-05, 0.75, name='norm1')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
lrn1 = tf.nn.local_response_normalization(conv1,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
#maxpool1
#max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
#conv2
#conv(5, 5, 256, 1, 1, group=2, name='conv2')
k_h = 5; k_w = 5; c_o = 256; s_h = 1; s_w = 1; group = 2
conv2W = tf.Variable(net_data["conv2"][0])
conv2b = tf.Variable(net_data["conv2"][1])
conv2_in = conv(maxpool1, conv2W, conv2b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv2 = tf.nn.relu(conv2_in)
#lrn2
#lrn(2, 2e-05, 0.75, name='norm2')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
lrn2 = tf.nn.local_response_normalization(conv2,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
#maxpool2
#max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
#conv3
#conv(3, 3, 384, 1, 1, name='conv3')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 1
conv3W = tf.Variable(net_data["conv3"][0])
conv3b = tf.Variable(net_data["conv3"][1])
conv3_in = conv(maxpool2, conv3W, conv3b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv3 = tf.nn.relu(conv3_in)
#conv4
#conv(3, 3, 384, 1, 1, group=2, name='conv4')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 2
conv4W = tf.Variable(net_data["conv4"][0])
conv4b = tf.Variable(net_data["conv4"][1])
conv4_in = conv(conv3, conv4W, conv4b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv4 = tf.nn.relu(conv4_in)
conv4flatten = tf.reshape(conv4, [datasize, int(prod(conv4.get_shape()[1:]))])
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# The vectorized data needs to be flattened and stored in a numpy array
new_input = conv4flatten.eval(session=sess)
return new_input
def create_M(train_dir, valid_dir, test_dir):
''' This function creates a .mat file which stores all faces
'''
mdict = {}
i = 0
for actor in act:
counter = 0
for filename in os.listdir(test_dir+actor+"/"):
|
train_matrix = create_new_input(train_dir, 70, i)
valid_matrix = create_new_input(valid_dir, 20, i)
test_matrix = create_new_input(test_dir, counter, i)
mdict["train"+str(i)] = train_matrix
mdict["valid"+str(i)] = valid_matrix
mdict["test"+str(i)] = test_matrix
savemat('newfaces.mat', mdict)
i += 1
def create_M_for_actor(actor, test_dir):
''' This function creates a .mat file which stores all faces
'''
mdict = {}
counter = 0
for i in range(6):
if act[i] == actor:
break
for filename in os.listdir(test_dir+actor+"/"):
counter += 1
test_matrix = create_new_input(test_dir, counter, i)
mdict["test"+str(i)] = test_matrix
savemat(actor+'.mat', mdict)
# Uncomment this line to create the .mat file, this is the most important part.
#create_M(train_dir, valid_dir, test_dir)
M = loadmat("/home/student/tf_alexnet/part2/newfaces.mat")
# The rest part of the code is the same as part 1 except the dimensions of the data
def get_train_batch(M, N):
n = N/10
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
train_k = ["train"+str(i) for i in range(6)]
train_size = len(M[train_k[0]])
#train_size = 5000
for k in range(6):
train_size = len(M[train_k[k]])
idx = array(random.permutation(train_size)[:n])
batch_xs = vstack((batch_xs, ((array(M[train_k[k]])[idx])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (n, 1)) ))
return batch_xs, batch_y_s
def get_test(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
test_k = ["test"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[test_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[test_k[k]]), 1)) ))
return batch_xs, batch_y_s
def get_test_for_actor(M, actor):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
for i in range(6):
if act[i] == actor:
break
test_k = "test"+str(i)
batch_xs = vstack((batch_xs, ((array(M[test_k])[:])/255.) ))
one_hot = zeros(6)
one_hot[i] = 1
#batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[test_k]), 1)) ))
batch_y_s = vstack((batch_y_s, one_hot))
return batch_xs, batch_y_s
def get_valid(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
valid_k = ["valid"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[valid_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[valid_k[k]]), 1)) ))
return batch_xs, batch_y_s
def get_train(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
train_k = ["train"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[train_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[train_k[k]]), 1)) ))
return batch_xs, batch_y_s
x = tf.placeholder(tf.float32, [None, 64896])
nhid = 300
W0 = tf.Variable(tf.random_normal([64896, nhid], stddev=0.01))
b0 = tf.Variable(tf.random_normal([nhid], stddev=0.01))
W1 = tf.Variable(tf.random_normal([nhid, 6], stddev=0.01))
b1 = tf.Variable(tf.random_normal([6], stddev=0.01))
layer1 = tf.nn.tanh(tf.matmul(x, W0)+b0)
layer2 = tf.matmul(layer1, W1)+b1
y = tf.nn.softmax(layer2)
y_ = tf.placeholder(tf.float32, [None, 6])
lam = 0.00000
decay_penalty =lam*tf.reduce_sum(tf.square(W0))+lam*tf.reduce_sum(tf.square(W1))
NLL = -tf.reduce_sum(y_*tf.log(y)+decay_penalty)
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(NLL)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_x, test_y = get_test(M)
valid_x, valid_y = get_valid(M)
# Initialize the data for plotting
trainCR = array([])
validCR = array([])
testCR = array([])
h = array([])
# Start the training process
for i in range(100):
batch_xs, batch_ys = get_train_batch(M, 50)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
if i % 1 == 0:
valid_accuracy = sess.run(accuracy, feed_dict={x: valid_x, y_: valid_y})
test_accuracy = sess.run(accuracy, feed_dict={x: test_x, y_: test_y})
train_accuracy = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys})
batch_xs, batch_ys = get_train(M)
# Save the trained weights and biases for part 4
if i == 99:
snapshot = {}
snapshot["W0"] = sess.run(W0)
snapshot["W1"] = sess.run(W1)
snapshot["b0"] = sess.run(b0)
snapshot["b1"] = sess.run(b1)
cPickle.dump(snapshot, open("new_snapshot"+str(i)+".pkl", "w"))
# Save parameters for plotting
trainCR = np.append(trainCR, train_accuracy)
validCR = np.append(validCR, valid_accuracy)
testCR = np.append(testCR, test_accuracy)
h = np.append(h, i)
# print "The final performance classification on the test set is: ", test_accuracy
# plt.plot(h, trainCR, 'r', label = "training set")
# plt.plot(h, validCR, 'g', label = "validation set")
# plt.plot(h, testCR, 'b', label = "test set")
# plt.title('Correct classification rate vs Iterations')
# plt.xlabel('Number of Iterations')
# plt.ylabel('Correct classification rate')
# plt.legend(loc='lower right')
# plt.show()
################################################################################
# Call create M for actor, change actor's name to see results for actor
actor = 'Gerard Butler'
create_M_for_actor(actor, test_dir)
M_actor = loadmat(actor+'.mat')
for i in range(6):
if act[i] == actor:
break
# Call test x function to get test_actorx, test_actory
test_actorx, test_actory = get_test_for_actor(M_actor, actor)
#Output:
# Feed the session with test actor x and its targets
output = sess.run(y, feed_dict={x: test_actorx, y_: test_actory})
inds = argsort(output)[0,:]
for i in range(6):
print class_names[inds[-1-i]], output[0, inds[-1-i]]
| counter += 1 | conditional_block |
part2.py | ################################################################################
#Michael Guerzhoy, 2016
#AlexNet implementation in TensorFlow, with weights
#Details:
#http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/
#
#With code from https://github.com/ethereon/caffe-tensorflow
#Model from https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet
#Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow
#
#
################################################################################
from numpy import *
import os
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import time
from scipy.misc import imread
from scipy.misc import imresize
import matplotlib.image as mpimg
from scipy.ndimage import filters
from scipy.io import loadmat, savemat
import urllib
from numpy import random
import tensorflow as tf
import cPickle
from actor_classes import class_names
# Initialize the actor names and their lastnames for later use
act = ['Gerard Butler', 'Daniel Radcliffe', 'Michael Vartan', 'Lorraine Bracco', 'Peri Gilpin', 'Angie Harmon']
lastname = ['butler', 'radcliffe', 'vartan', 'bracco', 'gilpin', 'harmon']
# Modify the dimension to vectorize the data
train_x = zeros((70, 227,227,3)).astype(float32)
train_y = zeros((1, 6))
xdim = train_x.shape[1:]
ydim = train_y.shape[1]
# Set up the directory locations
train_dir = 'training set/'
valid_dir = 'validation set/'
test_dir = 'test set/'
part2_dir = '/home/student/tf_alexnet/part2/'
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
'''From https://github.com/ethereon/caffe-tensorflow
'''
c_i = input.get_shape()[-1]
assert c_i%group==0
assert c_o%group==0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
if group==1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
conv = tf.concat(3, output_groups)
return tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape().as_list())
################################################################################
#Read Image
def create_new_input(dataset, datasize, n):
'''This function takes in all images in a given data set with given size and n is used to indicate the index of actor from the act list.
'''
actor_dir = act[n]+"/"
x_dummy = (random.random((datasize,)+ xdim)/255.).astype(float32)
i = x_dummy.copy()
if datasize == 70:
starting_index = 0
elif datasize == 20:
starting_index = 70
else:
starting_index = 90
for j in range(datasize): # This loop vectorizes the data
i[j,:,:,:] = (imread(dataset+actor_dir+lastname[n]+str(starting_index+j)+".jpg")[:,:,:3]).astype(float32)
i = i-mean(i)
net_data = load("../bvlc_alexnet.npy").item()
x = tf.Variable(i)
#conv1
#conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')
k_h = 11; k_w = 11; c_o = 96; s_h = 4; s_w = 4
conv1W = tf.Variable(net_data["conv1"][0])
conv1b = tf.Variable(net_data["conv1"][1])
conv1_in = conv(x, conv1W, conv1b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=1)
conv1 = tf.nn.relu(conv1_in)
#lrn1
#lrn(2, 2e-05, 0.75, name='norm1')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
lrn1 = tf.nn.local_response_normalization(conv1,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
#maxpool1
#max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
#conv2
#conv(5, 5, 256, 1, 1, group=2, name='conv2')
k_h = 5; k_w = 5; c_o = 256; s_h = 1; s_w = 1; group = 2
conv2W = tf.Variable(net_data["conv2"][0])
conv2b = tf.Variable(net_data["conv2"][1])
conv2_in = conv(maxpool1, conv2W, conv2b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv2 = tf.nn.relu(conv2_in)
#lrn2
#lrn(2, 2e-05, 0.75, name='norm2')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
lrn2 = tf.nn.local_response_normalization(conv2,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
#maxpool2
#max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
#conv3 | conv3b = tf.Variable(net_data["conv3"][1])
conv3_in = conv(maxpool2, conv3W, conv3b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv3 = tf.nn.relu(conv3_in)
#conv4
#conv(3, 3, 384, 1, 1, group=2, name='conv4')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 2
conv4W = tf.Variable(net_data["conv4"][0])
conv4b = tf.Variable(net_data["conv4"][1])
conv4_in = conv(conv3, conv4W, conv4b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv4 = tf.nn.relu(conv4_in)
conv4flatten = tf.reshape(conv4, [datasize, int(prod(conv4.get_shape()[1:]))])
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# The vectorized data needs to be flattened and stored in a numpy array
new_input = conv4flatten.eval(session=sess)
return new_input
def create_M(train_dir, valid_dir, test_dir):
''' This function creates a .mat file which stores all faces
'''
mdict = {}
i = 0
for actor in act:
counter = 0
for filename in os.listdir(test_dir+actor+"/"):
counter += 1
train_matrix = create_new_input(train_dir, 70, i)
valid_matrix = create_new_input(valid_dir, 20, i)
test_matrix = create_new_input(test_dir, counter, i)
mdict["train"+str(i)] = train_matrix
mdict["valid"+str(i)] = valid_matrix
mdict["test"+str(i)] = test_matrix
savemat('newfaces.mat', mdict)
i += 1
def create_M_for_actor(actor, test_dir):
''' This function creates a .mat file which stores all faces
'''
mdict = {}
counter = 0
for i in range(6):
if act[i] == actor:
break
for filename in os.listdir(test_dir+actor+"/"):
counter += 1
test_matrix = create_new_input(test_dir, counter, i)
mdict["test"+str(i)] = test_matrix
savemat(actor+'.mat', mdict)
# Uncomment this line to create the .mat file, this is the most important part.
#create_M(train_dir, valid_dir, test_dir)
M = loadmat("/home/student/tf_alexnet/part2/newfaces.mat")
# The rest part of the code is the same as part 1 except the dimensions of the data
def get_train_batch(M, N):
n = N/10
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
train_k = ["train"+str(i) for i in range(6)]
train_size = len(M[train_k[0]])
#train_size = 5000
for k in range(6):
train_size = len(M[train_k[k]])
idx = array(random.permutation(train_size)[:n])
batch_xs = vstack((batch_xs, ((array(M[train_k[k]])[idx])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (n, 1)) ))
return batch_xs, batch_y_s
def get_test(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
test_k = ["test"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[test_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[test_k[k]]), 1)) ))
return batch_xs, batch_y_s
def get_test_for_actor(M, actor):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
for i in range(6):
if act[i] == actor:
break
test_k = "test"+str(i)
batch_xs = vstack((batch_xs, ((array(M[test_k])[:])/255.) ))
one_hot = zeros(6)
one_hot[i] = 1
#batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[test_k]), 1)) ))
batch_y_s = vstack((batch_y_s, one_hot))
return batch_xs, batch_y_s
def get_valid(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
valid_k = ["valid"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[valid_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[valid_k[k]]), 1)) ))
return batch_xs, batch_y_s
def get_train(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
train_k = ["train"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[train_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[train_k[k]]), 1)) ))
return batch_xs, batch_y_s
x = tf.placeholder(tf.float32, [None, 64896])
nhid = 300
W0 = tf.Variable(tf.random_normal([64896, nhid], stddev=0.01))
b0 = tf.Variable(tf.random_normal([nhid], stddev=0.01))
W1 = tf.Variable(tf.random_normal([nhid, 6], stddev=0.01))
b1 = tf.Variable(tf.random_normal([6], stddev=0.01))
layer1 = tf.nn.tanh(tf.matmul(x, W0)+b0)
layer2 = tf.matmul(layer1, W1)+b1
y = tf.nn.softmax(layer2)
y_ = tf.placeholder(tf.float32, [None, 6])
lam = 0.00000
decay_penalty =lam*tf.reduce_sum(tf.square(W0))+lam*tf.reduce_sum(tf.square(W1))
NLL = -tf.reduce_sum(y_*tf.log(y)+decay_penalty)
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(NLL)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_x, test_y = get_test(M)
valid_x, valid_y = get_valid(M)
# Initialize the data for plotting
trainCR = array([])
validCR = array([])
testCR = array([])
h = array([])
# Start the training process
for i in range(100):
batch_xs, batch_ys = get_train_batch(M, 50)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
if i % 1 == 0:
valid_accuracy = sess.run(accuracy, feed_dict={x: valid_x, y_: valid_y})
test_accuracy = sess.run(accuracy, feed_dict={x: test_x, y_: test_y})
train_accuracy = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys})
batch_xs, batch_ys = get_train(M)
# Save the trained weights and biases for part 4
if i == 99:
snapshot = {}
snapshot["W0"] = sess.run(W0)
snapshot["W1"] = sess.run(W1)
snapshot["b0"] = sess.run(b0)
snapshot["b1"] = sess.run(b1)
cPickle.dump(snapshot, open("new_snapshot"+str(i)+".pkl", "w"))
# Save parameters for plotting
trainCR = np.append(trainCR, train_accuracy)
validCR = np.append(validCR, valid_accuracy)
testCR = np.append(testCR, test_accuracy)
h = np.append(h, i)
# print "The final performance classification on the test set is: ", test_accuracy
# plt.plot(h, trainCR, 'r', label = "training set")
# plt.plot(h, validCR, 'g', label = "validation set")
# plt.plot(h, testCR, 'b', label = "test set")
# plt.title('Correct classification rate vs Iterations')
# plt.xlabel('Number of Iterations')
# plt.ylabel('Correct classification rate')
# plt.legend(loc='lower right')
# plt.show()
################################################################################
# Call create M for actor, change actor's name to see results for actor
actor = 'Gerard Butler'
create_M_for_actor(actor, test_dir)
M_actor = loadmat(actor+'.mat')
for i in range(6):
if act[i] == actor:
break
# Call test x function to get test_actorx, test_actory
test_actorx, test_actory = get_test_for_actor(M_actor, actor)
#Output:
# Feed the session with test actor x and its targets
output = sess.run(y, feed_dict={x: test_actorx, y_: test_actory})
inds = argsort(output)[0,:]
for i in range(6):
print class_names[inds[-1-i]], output[0, inds[-1-i]] | #conv(3, 3, 384, 1, 1, name='conv3')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 1
conv3W = tf.Variable(net_data["conv3"][0]) | random_line_split |
part2.py | ################################################################################
#Michael Guerzhoy, 2016
#AlexNet implementation in TensorFlow, with weights
#Details:
#http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/
#
#With code from https://github.com/ethereon/caffe-tensorflow
#Model from https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet
#Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow
#
#
################################################################################
from numpy import *
import os
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import time
from scipy.misc import imread
from scipy.misc import imresize
import matplotlib.image as mpimg
from scipy.ndimage import filters
from scipy.io import loadmat, savemat
import urllib
from numpy import random
import tensorflow as tf
import cPickle
from actor_classes import class_names
# Initialize the actor names and their lastnames for later use
act = ['Gerard Butler', 'Daniel Radcliffe', 'Michael Vartan', 'Lorraine Bracco', 'Peri Gilpin', 'Angie Harmon']
lastname = ['butler', 'radcliffe', 'vartan', 'bracco', 'gilpin', 'harmon']
# Modify the dimension to vectorize the data
train_x = zeros((70, 227,227,3)).astype(float32)
train_y = zeros((1, 6))
xdim = train_x.shape[1:]
ydim = train_y.shape[1]
# Set up the directory locations
train_dir = 'training set/'
valid_dir = 'validation set/'
test_dir = 'test set/'
part2_dir = '/home/student/tf_alexnet/part2/'
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
'''From https://github.com/ethereon/caffe-tensorflow
'''
c_i = input.get_shape()[-1]
assert c_i%group==0
assert c_o%group==0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
if group==1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
conv = tf.concat(3, output_groups)
return tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape().as_list())
################################################################################
#Read Image
def create_new_input(dataset, datasize, n):
'''This function takes in all images in a given data set with given size and n is used to indicate the index of actor from the act list.
'''
actor_dir = act[n]+"/"
x_dummy = (random.random((datasize,)+ xdim)/255.).astype(float32)
i = x_dummy.copy()
if datasize == 70:
starting_index = 0
elif datasize == 20:
starting_index = 70
else:
starting_index = 90
for j in range(datasize): # This loop vectorizes the data
i[j,:,:,:] = (imread(dataset+actor_dir+lastname[n]+str(starting_index+j)+".jpg")[:,:,:3]).astype(float32)
i = i-mean(i)
net_data = load("../bvlc_alexnet.npy").item()
x = tf.Variable(i)
#conv1
#conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')
k_h = 11; k_w = 11; c_o = 96; s_h = 4; s_w = 4
conv1W = tf.Variable(net_data["conv1"][0])
conv1b = tf.Variable(net_data["conv1"][1])
conv1_in = conv(x, conv1W, conv1b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=1)
conv1 = tf.nn.relu(conv1_in)
#lrn1
#lrn(2, 2e-05, 0.75, name='norm1')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
lrn1 = tf.nn.local_response_normalization(conv1,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
#maxpool1
#max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
#conv2
#conv(5, 5, 256, 1, 1, group=2, name='conv2')
k_h = 5; k_w = 5; c_o = 256; s_h = 1; s_w = 1; group = 2
conv2W = tf.Variable(net_data["conv2"][0])
conv2b = tf.Variable(net_data["conv2"][1])
conv2_in = conv(maxpool1, conv2W, conv2b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv2 = tf.nn.relu(conv2_in)
#lrn2
#lrn(2, 2e-05, 0.75, name='norm2')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
lrn2 = tf.nn.local_response_normalization(conv2,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
#maxpool2
#max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
#conv3
#conv(3, 3, 384, 1, 1, name='conv3')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 1
conv3W = tf.Variable(net_data["conv3"][0])
conv3b = tf.Variable(net_data["conv3"][1])
conv3_in = conv(maxpool2, conv3W, conv3b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv3 = tf.nn.relu(conv3_in)
#conv4
#conv(3, 3, 384, 1, 1, group=2, name='conv4')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 2
conv4W = tf.Variable(net_data["conv4"][0])
conv4b = tf.Variable(net_data["conv4"][1])
conv4_in = conv(conv3, conv4W, conv4b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv4 = tf.nn.relu(conv4_in)
conv4flatten = tf.reshape(conv4, [datasize, int(prod(conv4.get_shape()[1:]))])
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# The vectorized data needs to be flattened and stored in a numpy array
new_input = conv4flatten.eval(session=sess)
return new_input
def create_M(train_dir, valid_dir, test_dir):
|
def create_M_for_actor(actor, test_dir):
''' This function creates a .mat file which stores all faces
'''
mdict = {}
counter = 0
for i in range(6):
if act[i] == actor:
break
for filename in os.listdir(test_dir+actor+"/"):
counter += 1
test_matrix = create_new_input(test_dir, counter, i)
mdict["test"+str(i)] = test_matrix
savemat(actor+'.mat', mdict)
# Uncomment this line to create the .mat file, this is the most important part.
#create_M(train_dir, valid_dir, test_dir)
M = loadmat("/home/student/tf_alexnet/part2/newfaces.mat")
# The rest part of the code is the same as part 1 except the dimensions of the data
def get_train_batch(M, N):
n = N/10
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
train_k = ["train"+str(i) for i in range(6)]
train_size = len(M[train_k[0]])
#train_size = 5000
for k in range(6):
train_size = len(M[train_k[k]])
idx = array(random.permutation(train_size)[:n])
batch_xs = vstack((batch_xs, ((array(M[train_k[k]])[idx])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (n, 1)) ))
return batch_xs, batch_y_s
def get_test(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
test_k = ["test"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[test_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[test_k[k]]), 1)) ))
return batch_xs, batch_y_s
def get_test_for_actor(M, actor):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
for i in range(6):
if act[i] == actor:
break
test_k = "test"+str(i)
batch_xs = vstack((batch_xs, ((array(M[test_k])[:])/255.) ))
one_hot = zeros(6)
one_hot[i] = 1
#batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[test_k]), 1)) ))
batch_y_s = vstack((batch_y_s, one_hot))
return batch_xs, batch_y_s
def get_valid(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
valid_k = ["valid"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[valid_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[valid_k[k]]), 1)) ))
return batch_xs, batch_y_s
def get_train(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
train_k = ["train"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[train_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[train_k[k]]), 1)) ))
return batch_xs, batch_y_s
x = tf.placeholder(tf.float32, [None, 64896])
nhid = 300
W0 = tf.Variable(tf.random_normal([64896, nhid], stddev=0.01))
b0 = tf.Variable(tf.random_normal([nhid], stddev=0.01))
W1 = tf.Variable(tf.random_normal([nhid, 6], stddev=0.01))
b1 = tf.Variable(tf.random_normal([6], stddev=0.01))
layer1 = tf.nn.tanh(tf.matmul(x, W0)+b0)
layer2 = tf.matmul(layer1, W1)+b1
y = tf.nn.softmax(layer2)
y_ = tf.placeholder(tf.float32, [None, 6])
lam = 0.00000
decay_penalty =lam*tf.reduce_sum(tf.square(W0))+lam*tf.reduce_sum(tf.square(W1))
NLL = -tf.reduce_sum(y_*tf.log(y)+decay_penalty)
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(NLL)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_x, test_y = get_test(M)
valid_x, valid_y = get_valid(M)
# Initialize the data for plotting
trainCR = array([])
validCR = array([])
testCR = array([])
h = array([])
# Start the training process
for i in range(100):
batch_xs, batch_ys = get_train_batch(M, 50)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
if i % 1 == 0:
valid_accuracy = sess.run(accuracy, feed_dict={x: valid_x, y_: valid_y})
test_accuracy = sess.run(accuracy, feed_dict={x: test_x, y_: test_y})
train_accuracy = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys})
batch_xs, batch_ys = get_train(M)
# Save the trained weights and biases for part 4
if i == 99:
snapshot = {}
snapshot["W0"] = sess.run(W0)
snapshot["W1"] = sess.run(W1)
snapshot["b0"] = sess.run(b0)
snapshot["b1"] = sess.run(b1)
cPickle.dump(snapshot, open("new_snapshot"+str(i)+".pkl", "w"))
# Save parameters for plotting
trainCR = np.append(trainCR, train_accuracy)
validCR = np.append(validCR, valid_accuracy)
testCR = np.append(testCR, test_accuracy)
h = np.append(h, i)
# print "The final performance classification on the test set is: ", test_accuracy
# plt.plot(h, trainCR, 'r', label = "training set")
# plt.plot(h, validCR, 'g', label = "validation set")
# plt.plot(h, testCR, 'b', label = "test set")
# plt.title('Correct classification rate vs Iterations')
# plt.xlabel('Number of Iterations')
# plt.ylabel('Correct classification rate')
# plt.legend(loc='lower right')
# plt.show()
################################################################################
# Call create M for actor, change actor's name to see results for actor
actor = 'Gerard Butler'
create_M_for_actor(actor, test_dir)
M_actor = loadmat(actor+'.mat')
for i in range(6):
if act[i] == actor:
break
# Call test x function to get test_actorx, test_actory
test_actorx, test_actory = get_test_for_actor(M_actor, actor)
#Output:
# Feed the session with test actor x and its targets
output = sess.run(y, feed_dict={x: test_actorx, y_: test_actory})
inds = argsort(output)[0,:]
for i in range(6):
print class_names[inds[-1-i]], output[0, inds[-1-i]]
| ''' This function creates a .mat file which stores all faces
'''
mdict = {}
i = 0
for actor in act:
counter = 0
for filename in os.listdir(test_dir+actor+"/"):
counter += 1
train_matrix = create_new_input(train_dir, 70, i)
valid_matrix = create_new_input(valid_dir, 20, i)
test_matrix = create_new_input(test_dir, counter, i)
mdict["train"+str(i)] = train_matrix
mdict["valid"+str(i)] = valid_matrix
mdict["test"+str(i)] = test_matrix
savemat('newfaces.mat', mdict)
i += 1 | identifier_body |
part2.py | ################################################################################
#Michael Guerzhoy, 2016
#AlexNet implementation in TensorFlow, with weights
#Details:
#http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/
#
#With code from https://github.com/ethereon/caffe-tensorflow
#Model from https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet
#Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow
#
#
################################################################################
from numpy import *
import os
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import time
from scipy.misc import imread
from scipy.misc import imresize
import matplotlib.image as mpimg
from scipy.ndimage import filters
from scipy.io import loadmat, savemat
import urllib
from numpy import random
import tensorflow as tf
import cPickle
from actor_classes import class_names
# Initialize the actor names and their lastnames for later use
act = ['Gerard Butler', 'Daniel Radcliffe', 'Michael Vartan', 'Lorraine Bracco', 'Peri Gilpin', 'Angie Harmon']
lastname = ['butler', 'radcliffe', 'vartan', 'bracco', 'gilpin', 'harmon']
# Modify the dimension to vectorize the data
train_x = zeros((70, 227,227,3)).astype(float32)
train_y = zeros((1, 6))
xdim = train_x.shape[1:]
ydim = train_y.shape[1]
# Set up the directory locations
train_dir = 'training set/'
valid_dir = 'validation set/'
test_dir = 'test set/'
part2_dir = '/home/student/tf_alexnet/part2/'
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
'''From https://github.com/ethereon/caffe-tensorflow
'''
c_i = input.get_shape()[-1]
assert c_i%group==0
assert c_o%group==0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
if group==1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
conv = tf.concat(3, output_groups)
return tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape().as_list())
################################################################################
#Read Image
def | (dataset, datasize, n):
'''This function takes in all images in a given data set with given size and n is used to indicate the index of actor from the act list.
'''
actor_dir = act[n]+"/"
x_dummy = (random.random((datasize,)+ xdim)/255.).astype(float32)
i = x_dummy.copy()
if datasize == 70:
starting_index = 0
elif datasize == 20:
starting_index = 70
else:
starting_index = 90
for j in range(datasize): # This loop vectorizes the data
i[j,:,:,:] = (imread(dataset+actor_dir+lastname[n]+str(starting_index+j)+".jpg")[:,:,:3]).astype(float32)
i = i-mean(i)
net_data = load("../bvlc_alexnet.npy").item()
x = tf.Variable(i)
#conv1
#conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')
k_h = 11; k_w = 11; c_o = 96; s_h = 4; s_w = 4
conv1W = tf.Variable(net_data["conv1"][0])
conv1b = tf.Variable(net_data["conv1"][1])
conv1_in = conv(x, conv1W, conv1b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=1)
conv1 = tf.nn.relu(conv1_in)
#lrn1
#lrn(2, 2e-05, 0.75, name='norm1')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
lrn1 = tf.nn.local_response_normalization(conv1,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
#maxpool1
#max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
#conv2
#conv(5, 5, 256, 1, 1, group=2, name='conv2')
k_h = 5; k_w = 5; c_o = 256; s_h = 1; s_w = 1; group = 2
conv2W = tf.Variable(net_data["conv2"][0])
conv2b = tf.Variable(net_data["conv2"][1])
conv2_in = conv(maxpool1, conv2W, conv2b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv2 = tf.nn.relu(conv2_in)
#lrn2
#lrn(2, 2e-05, 0.75, name='norm2')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
lrn2 = tf.nn.local_response_normalization(conv2,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
#maxpool2
#max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
#conv3
#conv(3, 3, 384, 1, 1, name='conv3')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 1
conv3W = tf.Variable(net_data["conv3"][0])
conv3b = tf.Variable(net_data["conv3"][1])
conv3_in = conv(maxpool2, conv3W, conv3b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv3 = tf.nn.relu(conv3_in)
#conv4
#conv(3, 3, 384, 1, 1, group=2, name='conv4')
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 2
conv4W = tf.Variable(net_data["conv4"][0])
conv4b = tf.Variable(net_data["conv4"][1])
conv4_in = conv(conv3, conv4W, conv4b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv4 = tf.nn.relu(conv4_in)
conv4flatten = tf.reshape(conv4, [datasize, int(prod(conv4.get_shape()[1:]))])
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# The vectorized data needs to be flattened and stored in a numpy array
new_input = conv4flatten.eval(session=sess)
return new_input
def create_M(train_dir, valid_dir, test_dir):
''' This function creates a .mat file which stores all faces
'''
mdict = {}
i = 0
for actor in act:
counter = 0
for filename in os.listdir(test_dir+actor+"/"):
counter += 1
train_matrix = create_new_input(train_dir, 70, i)
valid_matrix = create_new_input(valid_dir, 20, i)
test_matrix = create_new_input(test_dir, counter, i)
mdict["train"+str(i)] = train_matrix
mdict["valid"+str(i)] = valid_matrix
mdict["test"+str(i)] = test_matrix
savemat('newfaces.mat', mdict)
i += 1
def create_M_for_actor(actor, test_dir):
''' This function creates a .mat file which stores all faces
'''
mdict = {}
counter = 0
for i in range(6):
if act[i] == actor:
break
for filename in os.listdir(test_dir+actor+"/"):
counter += 1
test_matrix = create_new_input(test_dir, counter, i)
mdict["test"+str(i)] = test_matrix
savemat(actor+'.mat', mdict)
# Uncomment this line to create the .mat file, this is the most important part.
#create_M(train_dir, valid_dir, test_dir)
M = loadmat("/home/student/tf_alexnet/part2/newfaces.mat")
# The rest part of the code is the same as part 1 except the dimensions of the data
def get_train_batch(M, N):
n = N/10
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
train_k = ["train"+str(i) for i in range(6)]
train_size = len(M[train_k[0]])
#train_size = 5000
for k in range(6):
train_size = len(M[train_k[k]])
idx = array(random.permutation(train_size)[:n])
batch_xs = vstack((batch_xs, ((array(M[train_k[k]])[idx])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (n, 1)) ))
return batch_xs, batch_y_s
def get_test(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
test_k = ["test"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[test_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[test_k[k]]), 1)) ))
return batch_xs, batch_y_s
def get_test_for_actor(M, actor):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
for i in range(6):
if act[i] == actor:
break
test_k = "test"+str(i)
batch_xs = vstack((batch_xs, ((array(M[test_k])[:])/255.) ))
one_hot = zeros(6)
one_hot[i] = 1
#batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[test_k]), 1)) ))
batch_y_s = vstack((batch_y_s, one_hot))
return batch_xs, batch_y_s
def get_valid(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
valid_k = ["valid"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[valid_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[valid_k[k]]), 1)) ))
return batch_xs, batch_y_s
def get_train(M):
batch_xs = zeros((0, 64896))
batch_y_s = zeros( (0, 6))
train_k = ["train"+str(i) for i in range(6)]
for k in range(6):
batch_xs = vstack((batch_xs, ((array(M[train_k[k]])[:])/255.) ))
one_hot = zeros(6)
one_hot[k] = 1
batch_y_s = vstack((batch_y_s, tile(one_hot, (len(M[train_k[k]]), 1)) ))
return batch_xs, batch_y_s
x = tf.placeholder(tf.float32, [None, 64896])
nhid = 300
W0 = tf.Variable(tf.random_normal([64896, nhid], stddev=0.01))
b0 = tf.Variable(tf.random_normal([nhid], stddev=0.01))
W1 = tf.Variable(tf.random_normal([nhid, 6], stddev=0.01))
b1 = tf.Variable(tf.random_normal([6], stddev=0.01))
layer1 = tf.nn.tanh(tf.matmul(x, W0)+b0)
layer2 = tf.matmul(layer1, W1)+b1
y = tf.nn.softmax(layer2)
y_ = tf.placeholder(tf.float32, [None, 6])
lam = 0.00000
decay_penalty =lam*tf.reduce_sum(tf.square(W0))+lam*tf.reduce_sum(tf.square(W1))
NLL = -tf.reduce_sum(y_*tf.log(y)+decay_penalty)
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(NLL)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_x, test_y = get_test(M)
valid_x, valid_y = get_valid(M)
# Initialize the data for plotting
trainCR = array([])
validCR = array([])
testCR = array([])
h = array([])
# Start the training process
for i in range(100):
batch_xs, batch_ys = get_train_batch(M, 50)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
if i % 1 == 0:
valid_accuracy = sess.run(accuracy, feed_dict={x: valid_x, y_: valid_y})
test_accuracy = sess.run(accuracy, feed_dict={x: test_x, y_: test_y})
train_accuracy = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys})
batch_xs, batch_ys = get_train(M)
# Save the trained weights and biases for part 4
if i == 99:
snapshot = {}
snapshot["W0"] = sess.run(W0)
snapshot["W1"] = sess.run(W1)
snapshot["b0"] = sess.run(b0)
snapshot["b1"] = sess.run(b1)
cPickle.dump(snapshot, open("new_snapshot"+str(i)+".pkl", "w"))
# Save parameters for plotting
trainCR = np.append(trainCR, train_accuracy)
validCR = np.append(validCR, valid_accuracy)
testCR = np.append(testCR, test_accuracy)
h = np.append(h, i)
# print "The final performance classification on the test set is: ", test_accuracy
# plt.plot(h, trainCR, 'r', label = "training set")
# plt.plot(h, validCR, 'g', label = "validation set")
# plt.plot(h, testCR, 'b', label = "test set")
# plt.title('Correct classification rate vs Iterations')
# plt.xlabel('Number of Iterations')
# plt.ylabel('Correct classification rate')
# plt.legend(loc='lower right')
# plt.show()
################################################################################
# Call create M for actor, change actor's name to see results for actor
actor = 'Gerard Butler'
create_M_for_actor(actor, test_dir)
M_actor = loadmat(actor+'.mat')
for i in range(6):
if act[i] == actor:
break
# Call test x function to get test_actorx, test_actory
test_actorx, test_actory = get_test_for_actor(M_actor, actor)
#Output:
# Feed the session with test actor x and its targets
output = sess.run(y, feed_dict={x: test_actorx, y_: test_actory})
inds = argsort(output)[0,:]
for i in range(6):
print class_names[inds[-1-i]], output[0, inds[-1-i]]
| create_new_input | identifier_name |
action.py | import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms
from typing import Union, List, Tuple, TypeVar, Callable, NewType, Optional
from func_helper import pip
import func_helper.func_helper.iterator as it
DataSource = Union[dict, pd.DataFrame, pd.Series]
Ax = plt.subplot
AxPlot = Callable[[Ax], Ax]
PlotAction = Callable[..., AxPlot]
Selector = Optional[Union[str, Callable[[DataSource], DataSource]]]
LiteralOrSequence = Optional[Union[int,float,str,list,tuple,DataSource]]
LiteralOrSequencer = Optional[Union[LiteralOrSequence, Callable[[DataSource], DataSource]]]
def plot_action(plotter: PlotAction, arg_names, default_kwargs={}):
"""
Generate plot action by hashable object and some parameters, which takes
matplotlib.pyplot.Axes.subplot and return it.
When some parameters are given as list, duplicate the other parameters
and make multiple plots.
Parameters
----------
plotter: *arg,**kwargs -> ax -> ax
default: dict
Return
------
callable: (kwargs -> df, dict, kwargs) -> (ax -> ax)
"""
arg_filter = get_values_by_keys(["data"]+arg_names, None)
kwarg_filter = filter_dict(default_kwargs.keys())
def presetting(setting={}, **setting_kwargs):
def set_data(data_source: DataSource, option: dict={}, **option_kwargs):
"""
Parameters
----------
df: pandas.DataFrame | dict
option: dict, optional
{
"x" : "x_name",
"y" : ["y1", "y2"],
"ylim" : (None,10),
"ylabel" : "Y",
"linewidth" : [1,1.5]
}
kwargs: parameters corresponding to items of option.
"""
list_of_entry = to_flatlist(
{"data":data_source,**default_kwargs, **setting, **setting_kwargs, **option, **option_kwargs})
# print(list_of_entry)
arg_and_kwarg=generate_arg_and_kwags()(
#as_DataFrame(data_source),
#data_source,
list(map(arg_filter, list_of_entry)),
list(map(kwarg_filter, list_of_entry))
)
# return plot action
return lambda ax: it.reducing(
lambda acc, e: plotter(*e[0], **e[1])(acc))(ax)(arg_and_kwarg)
return set_data
return presetting
def as_DataFrame(d: DataSource) -> pd.DataFrame:
if type(d) in [pd.DataFrame, pd.Series]:
return d
elif type(d) in [list, dict]:
return pd.DataFrame(d)
else:
raise TypeError(f"{type(d)} is not available for data source.")
def generate_arg_and_kwags():
"""
Setup positional arguments and keyword arguments for plotter.
"""
def gen_func(
#df: DataSource,
option: List[list],
style: List[dict]
)->List[Tuple[list, dict]]:
if len(option) != len(style):
raise SystemError("option and style must be same size list.")
arg_and_kwarg = []
for o, s in zip(option, style):
arg = [*o]
kwargs = s
arg_and_kwarg.append((arg, kwargs))
return arg_and_kwarg
return gen_func
def get_subset(use_index=True)\
->Callable[[DataSource,Selector], DataSource]:
"""
"""
def f(df: DataSource, k:Selector)->DataSource:
"""
Select value in hashable (pandas.DataFrame, dict, etc.)
"""
if type(df) is pd.DataFrame:
if k in ["index", None]:
return df.index
elif type(k) is str:
return df[k]
elif callable(k):
return k(df)
else:
return df[k]
elif type(df) is pd.Series:
if k in ["index", None]:
return df.index
elif callable(k):
return k(df)
else:
return df
elif type(df) is dict:
if type(k) is str:
return df.get(k,[])
elif callable(k):
return k(df)
else:
return df
else:
#print(df)
raise TypeError("df must be pandas.DataFrame or pandas.Series.")
return f
def get_literal_or_series(input:LiteralOrSequencer, df: DataSource)->LiteralOrSequence:
if callable(input):
return input(df)
else:
return input
def get_value(default=""):
def f(_, k, v):
"""
Return value.
"""
return v if v is not None else default
return f
def is_iterable(o):
return type(o) in [list, tuple]
def to_flatlist(d: dict) -> List[dict]:
"""
Usage
-----
d = {
"x" : (0,1,2),
"y" : [1,2],
"z" : 0
}
to_flatlist(d) is...
[
{"x" : 0, "y" : [1,2], "z" : 0},
{"x" : 1, "y" : [1,2], "z" : 0},
{"x" : 2, "y" : [1,2], "z" : 0}
]
"""
def value_to_list(d: dict) -> dict:
return dict(it.mapping(
lambda kv: (kv[0], kv[1]) if type(
kv[1]) is tuple else (kv[0], [kv[1]])
)(d.items()))
list_dict = value_to_list(d)
max_length = it.reducing(
lambda acc, e: acc if acc >= len(e) else len(e)
)(0)(list_dict.values())
flatlist = []
for i in range(max_length):
new_dict = {}
for k in list_dict.keys():
if len(list_dict[k]) >= i+1:
new_dict.update({(k): list_dict[k][i]})
else:
new_dict.update({(k): list_dict[k][-1]})
flatlist.append(new_dict)
return flatlist
def filter_dict(k: list) -> Callable[[dict], dict]:
return lambda d: dict(
filter(lambda kv: (kv[0] in k) and kv[1] is not None, d.items())
)
def translate_table(table_dict: dict):
return lambda d: {table_dict.get(k, k): v for k, v in d.items()}
def get_values_by_keys(k: list, default=None)->Callable[[dict], list]:
"""
Filter dictionary by list of keys.
Parameters
----------
k: list
default: any, optional
Set as default value for key not in dict.
Default value is None
"""
return lambda d: list(map(lambda key: d.get(key, default), k))
def Iget_factor(
df: pd.DataFrame,
f: Union[str, Callable[[pd.DataFrame], pd.Series]],
factor: Optional[Union[list, Callable[[pd.DataFrame], pd.Series]]]
)->Tuple[pd.Series, list]:
d = f(df) if callable(f) else df[f]
if type(factor) is list:
return (d, factor)
elif callable(factor):
return factor(d)
else:
return (d, d.astype('category').cat.categories)
def selector_or_literal(df, s):
if s is None:
return df.index
elif callable(s):
return s(df)
elif type(s) is list:
return s
elif type(s) in [int, float]:
return [s]
elif s in df:
return df[s]
else:
return df.index
def Icoordinate_transform(ax, xcoordinate: Optional[str], ycoordinate: Optional[str]):
|
default_kwargs = {}
_tick_params_each = {
"labelsize": 12,
"rotation": 0,
"which": "both",
"direction": "in",
"color": "black",
"labelcolor": "black"
}
_tick_params_kwargs = {
**_tick_params_each,
"labelbottom": None,
"labelleft": None,
"labeltop": None,
"labelright": None,
"bottom": None,
"left": None,
"top": None,
"right": None
}
_label_kwargs = {
"alpha": 1,
"color": "black",
"family": ["Noto Sans CJK JP", "sans-serif"],
# "fontname" : "sans-serif",
"fontsize": 16,
"fontstyle": "normal",
}
_line2d_kwargs = {
"alpha": 1,
"marker": "",
"markeredgecolor": None,
"markeredgewidth": None,
"markerfacecolor": None,
"markerfacecoloralt": None,
"markersize": None,
"linestyle": None,
"linewidth": None,
"color": None,
}
_grid_kwargs:dict = {
"axis": None,
**_line2d_kwargs,
"color": 'gray',
"linestyle": ':',
"linewidth": 1,
}
_line_kwargs = {
**_line2d_kwargs,
"linestyle": "-",
"linewidth": 1,
}
_vhlines_kwargs = {
"color": None,
"linestyle": "-",
"linewidth": 1,
"alpha": 1
}
_scatter_kwargs = {
"c": None,
"s": None,
"cmap": None,
"norm": None,
"vmin": None,
"vmax": None,
"alpha": 1,
"marker": "o",
"edgecolors": "face",
"linewidth": None,
"linestyle": "-"
}
_fill_kwargs = {
"color": "green",
"alpha": 0.5,
"hatch": None
}
_quiver_kwargs = {
"scale": 1,
"scale_units": "dots",
"alpha": 1,
"color": "black",
"width": 1,
"headwidth": 0.1,
"headlength": 0.2
}
_axline_kwargs = {
**_line2d_kwargs,
"alpha": 0.5,
"color": "green",
"linewidth": None,
"linestyle": "-"
}
_box_kwargs = {
"vert": True,
"notch": False,
"sym": None, # Symbol setting for out lier
"whis": 1.5,
"bootstrap": None,
"usermedians": None,
"conf_intervals": None,
"widths": 0.5,
"patch_artist": False,
"manage_xticks": True,
"autorange": False,
"meanline": False,
"zorder": None,
"showcaps": True,
"showbox": True,
"showfliers": True,
"showmeans": False,
"capprops": None,
"boxprops": None,
"whiskerprops": None,
"flierprops": None,
"medianprops": None,
"meanprops": None
}
_violin_kwargs = {
"vert": True,
"widths": 0.5,
"showmeans": False,
"showextrema": True,
"showmedians": False,
"points": 100,
"bw_method": None,
"scale": "width", # "width" | "count"
"bodies": None,
"cmeans": None
}
"""
https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.violin.html
bodies:{
"facecolor" : "#2196f3",
"edgecolor" : "#005588",
"alpha" : 0.5
}
https://matplotlib.org/api/collections_api.html#matplotlib.collections.PolyCollection
cmeans:{
"edgecolor",
"linestyle",
"linewidth",
"alpha"
}
https://matplotlib.org/api/collections_api.html#matplotlib.collections.LineCollection
"""
_text_kwargs = {
"ha": 'left',
"va": 'bottom',
"color": "black",
"family": None,
"fontsize": 10,
"rotation": None,
"style": None,
"xcoordinate": None, # "data" = None | "axes"
"ycoordinate": None, # "data" = None | "axes"
"wrap": False
}
_hist_kwargs = {
"bins": None,
"range": None,
"density": None,
"weights": None,
"cumulative": False,
"bottom": None,
"histtype": 'bar',
"align": 'mid',
"orientation": 'vertical',
"rwidth": None,
"log": False,
"color": "#2196f3",
"label": None,
"stacked": False,
"normed": None,
}
_bar_kwargs = {
"norm": False,
"vert": True,
# "width": 0.8,
"align": "center",
}
default_kwargs.update({
"tick_params_each": _tick_params_each,
"tick_params": _tick_params_kwargs,
"axis_label": _label_kwargs,
"grid": _grid_kwargs,
"line": _line_kwargs,
"vlines": _vhlines_kwargs,
"hlines": _vhlines_kwargs,
"scatter": _scatter_kwargs,
"fill": _fill_kwargs,
"quiver": _quiver_kwargs,
"axline": _axline_kwargs,
"box": _box_kwargs,
"violin": _violin_kwargs,
"text": _text_kwargs,
"hist": _hist_kwargs,
"bar": _bar_kwargs,
})
def _annotate_plotter(df, from_pos, to_pos, text, *arg, textdict={}, **kwargs) -> AxPlot:
def plot(ax):
return ax
return plot
def annotate(**presetting):
return plot_action(
_annotate_plotter,
["from_pos", "to_pos", "text"],
{**_quiver_kwargs, "textdict": _text_kwargs}
)(**presetting)
| """
Select coordinate transform method for x and y axis.
"""
return matplotlib.transforms.blended_transform_factory(
ax.transAxes if xcoordinate is "axes" else ax.transData,
ax.transAxes if ycoordinate is "axes" else ax.transData
) | identifier_body |
action.py | import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms
from typing import Union, List, Tuple, TypeVar, Callable, NewType, Optional
from func_helper import pip
import func_helper.func_helper.iterator as it
DataSource = Union[dict, pd.DataFrame, pd.Series]
Ax = plt.subplot
AxPlot = Callable[[Ax], Ax]
PlotAction = Callable[..., AxPlot]
Selector = Optional[Union[str, Callable[[DataSource], DataSource]]]
LiteralOrSequence = Optional[Union[int,float,str,list,tuple,DataSource]]
LiteralOrSequencer = Optional[Union[LiteralOrSequence, Callable[[DataSource], DataSource]]]
def plot_action(plotter: PlotAction, arg_names, default_kwargs={}):
"""
Generate plot action by hashable object and some parameters, which takes
matplotlib.pyplot.Axes.subplot and return it.
When some parameters are given as list, duplicate the other parameters
and make multiple plots.
Parameters
----------
plotter: *arg,**kwargs -> ax -> ax
default: dict
Return
------
callable: (kwargs -> df, dict, kwargs) -> (ax -> ax)
"""
arg_filter = get_values_by_keys(["data"]+arg_names, None)
kwarg_filter = filter_dict(default_kwargs.keys())
def presetting(setting={}, **setting_kwargs):
def set_data(data_source: DataSource, option: dict={}, **option_kwargs):
"""
Parameters
----------
df: pandas.DataFrame | dict
option: dict, optional
{
"x" : "x_name",
"y" : ["y1", "y2"],
"ylim" : (None,10),
"ylabel" : "Y",
"linewidth" : [1,1.5]
}
kwargs: parameters corresponding to items of option.
"""
list_of_entry = to_flatlist(
{"data":data_source,**default_kwargs, **setting, **setting_kwargs, **option, **option_kwargs})
# print(list_of_entry)
arg_and_kwarg=generate_arg_and_kwags()(
#as_DataFrame(data_source),
#data_source,
list(map(arg_filter, list_of_entry)),
list(map(kwarg_filter, list_of_entry))
)
# return plot action
return lambda ax: it.reducing(
lambda acc, e: plotter(*e[0], **e[1])(acc))(ax)(arg_and_kwarg)
return set_data
return presetting
def as_DataFrame(d: DataSource) -> pd.DataFrame:
if type(d) in [pd.DataFrame, pd.Series]:
|
elif type(d) in [list, dict]:
return pd.DataFrame(d)
else:
raise TypeError(f"{type(d)} is not available for data source.")
def generate_arg_and_kwags():
"""
Setup positional arguments and keyword arguments for plotter.
"""
def gen_func(
#df: DataSource,
option: List[list],
style: List[dict]
)->List[Tuple[list, dict]]:
if len(option) != len(style):
raise SystemError("option and style must be same size list.")
arg_and_kwarg = []
for o, s in zip(option, style):
arg = [*o]
kwargs = s
arg_and_kwarg.append((arg, kwargs))
return arg_and_kwarg
return gen_func
def get_subset(use_index=True)\
->Callable[[DataSource,Selector], DataSource]:
"""
"""
def f(df: DataSource, k:Selector)->DataSource:
"""
Select value in hashable (pandas.DataFrame, dict, etc.)
"""
if type(df) is pd.DataFrame:
if k in ["index", None]:
return df.index
elif type(k) is str:
return df[k]
elif callable(k):
return k(df)
else:
return df[k]
elif type(df) is pd.Series:
if k in ["index", None]:
return df.index
elif callable(k):
return k(df)
else:
return df
elif type(df) is dict:
if type(k) is str:
return df.get(k,[])
elif callable(k):
return k(df)
else:
return df
else:
#print(df)
raise TypeError("df must be pandas.DataFrame or pandas.Series.")
return f
def get_literal_or_series(input:LiteralOrSequencer, df: DataSource)->LiteralOrSequence:
if callable(input):
return input(df)
else:
return input
def get_value(default=""):
def f(_, k, v):
"""
Return value.
"""
return v if v is not None else default
return f
def is_iterable(o):
return type(o) in [list, tuple]
def to_flatlist(d: dict) -> List[dict]:
"""
Usage
-----
d = {
"x" : (0,1,2),
"y" : [1,2],
"z" : 0
}
to_flatlist(d) is...
[
{"x" : 0, "y" : [1,2], "z" : 0},
{"x" : 1, "y" : [1,2], "z" : 0},
{"x" : 2, "y" : [1,2], "z" : 0}
]
"""
def value_to_list(d: dict) -> dict:
return dict(it.mapping(
lambda kv: (kv[0], kv[1]) if type(
kv[1]) is tuple else (kv[0], [kv[1]])
)(d.items()))
list_dict = value_to_list(d)
max_length = it.reducing(
lambda acc, e: acc if acc >= len(e) else len(e)
)(0)(list_dict.values())
flatlist = []
for i in range(max_length):
new_dict = {}
for k in list_dict.keys():
if len(list_dict[k]) >= i+1:
new_dict.update({(k): list_dict[k][i]})
else:
new_dict.update({(k): list_dict[k][-1]})
flatlist.append(new_dict)
return flatlist
def filter_dict(k: list) -> Callable[[dict], dict]:
return lambda d: dict(
filter(lambda kv: (kv[0] in k) and kv[1] is not None, d.items())
)
def translate_table(table_dict: dict):
return lambda d: {table_dict.get(k, k): v for k, v in d.items()}
def get_values_by_keys(k: list, default=None)->Callable[[dict], list]:
"""
Filter dictionary by list of keys.
Parameters
----------
k: list
default: any, optional
Set as default value for key not in dict.
Default value is None
"""
return lambda d: list(map(lambda key: d.get(key, default), k))
def Iget_factor(
df: pd.DataFrame,
f: Union[str, Callable[[pd.DataFrame], pd.Series]],
factor: Optional[Union[list, Callable[[pd.DataFrame], pd.Series]]]
)->Tuple[pd.Series, list]:
d = f(df) if callable(f) else df[f]
if type(factor) is list:
return (d, factor)
elif callable(factor):
return factor(d)
else:
return (d, d.astype('category').cat.categories)
def selector_or_literal(df, s):
if s is None:
return df.index
elif callable(s):
return s(df)
elif type(s) is list:
return s
elif type(s) in [int, float]:
return [s]
elif s in df:
return df[s]
else:
return df.index
def Icoordinate_transform(ax, xcoordinate: Optional[str], ycoordinate: Optional[str]):
"""
Select coordinate transform method for x and y axis.
"""
return matplotlib.transforms.blended_transform_factory(
ax.transAxes if xcoordinate is "axes" else ax.transData,
ax.transAxes if ycoordinate is "axes" else ax.transData
)
default_kwargs = {}
_tick_params_each = {
"labelsize": 12,
"rotation": 0,
"which": "both",
"direction": "in",
"color": "black",
"labelcolor": "black"
}
_tick_params_kwargs = {
**_tick_params_each,
"labelbottom": None,
"labelleft": None,
"labeltop": None,
"labelright": None,
"bottom": None,
"left": None,
"top": None,
"right": None
}
_label_kwargs = {
"alpha": 1,
"color": "black",
"family": ["Noto Sans CJK JP", "sans-serif"],
# "fontname" : "sans-serif",
"fontsize": 16,
"fontstyle": "normal",
}
_line2d_kwargs = {
"alpha": 1,
"marker": "",
"markeredgecolor": None,
"markeredgewidth": None,
"markerfacecolor": None,
"markerfacecoloralt": None,
"markersize": None,
"linestyle": None,
"linewidth": None,
"color": None,
}
_grid_kwargs:dict = {
"axis": None,
**_line2d_kwargs,
"color": 'gray',
"linestyle": ':',
"linewidth": 1,
}
_line_kwargs = {
**_line2d_kwargs,
"linestyle": "-",
"linewidth": 1,
}
_vhlines_kwargs = {
"color": None,
"linestyle": "-",
"linewidth": 1,
"alpha": 1
}
_scatter_kwargs = {
"c": None,
"s": None,
"cmap": None,
"norm": None,
"vmin": None,
"vmax": None,
"alpha": 1,
"marker": "o",
"edgecolors": "face",
"linewidth": None,
"linestyle": "-"
}
_fill_kwargs = {
"color": "green",
"alpha": 0.5,
"hatch": None
}
_quiver_kwargs = {
"scale": 1,
"scale_units": "dots",
"alpha": 1,
"color": "black",
"width": 1,
"headwidth": 0.1,
"headlength": 0.2
}
_axline_kwargs = {
**_line2d_kwargs,
"alpha": 0.5,
"color": "green",
"linewidth": None,
"linestyle": "-"
}
_box_kwargs = {
"vert": True,
"notch": False,
"sym": None, # Symbol setting for out lier
"whis": 1.5,
"bootstrap": None,
"usermedians": None,
"conf_intervals": None,
"widths": 0.5,
"patch_artist": False,
"manage_xticks": True,
"autorange": False,
"meanline": False,
"zorder": None,
"showcaps": True,
"showbox": True,
"showfliers": True,
"showmeans": False,
"capprops": None,
"boxprops": None,
"whiskerprops": None,
"flierprops": None,
"medianprops": None,
"meanprops": None
}
_violin_kwargs = {
"vert": True,
"widths": 0.5,
"showmeans": False,
"showextrema": True,
"showmedians": False,
"points": 100,
"bw_method": None,
"scale": "width", # "width" | "count"
"bodies": None,
"cmeans": None
}
"""
https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.violin.html
bodies:{
"facecolor" : "#2196f3",
"edgecolor" : "#005588",
"alpha" : 0.5
}
https://matplotlib.org/api/collections_api.html#matplotlib.collections.PolyCollection
cmeans:{
"edgecolor",
"linestyle",
"linewidth",
"alpha"
}
https://matplotlib.org/api/collections_api.html#matplotlib.collections.LineCollection
"""
_text_kwargs = {
"ha": 'left',
"va": 'bottom',
"color": "black",
"family": None,
"fontsize": 10,
"rotation": None,
"style": None,
"xcoordinate": None, # "data" = None | "axes"
"ycoordinate": None, # "data" = None | "axes"
"wrap": False
}
_hist_kwargs = {
"bins": None,
"range": None,
"density": None,
"weights": None,
"cumulative": False,
"bottom": None,
"histtype": 'bar',
"align": 'mid',
"orientation": 'vertical',
"rwidth": None,
"log": False,
"color": "#2196f3",
"label": None,
"stacked": False,
"normed": None,
}
_bar_kwargs = {
"norm": False,
"vert": True,
# "width": 0.8,
"align": "center",
}
default_kwargs.update({
"tick_params_each": _tick_params_each,
"tick_params": _tick_params_kwargs,
"axis_label": _label_kwargs,
"grid": _grid_kwargs,
"line": _line_kwargs,
"vlines": _vhlines_kwargs,
"hlines": _vhlines_kwargs,
"scatter": _scatter_kwargs,
"fill": _fill_kwargs,
"quiver": _quiver_kwargs,
"axline": _axline_kwargs,
"box": _box_kwargs,
"violin": _violin_kwargs,
"text": _text_kwargs,
"hist": _hist_kwargs,
"bar": _bar_kwargs,
})
def _annotate_plotter(df, from_pos, to_pos, text, *arg, textdict={}, **kwargs) -> AxPlot:
def plot(ax):
return ax
return plot
def annotate(**presetting):
return plot_action(
_annotate_plotter,
["from_pos", "to_pos", "text"],
{**_quiver_kwargs, "textdict": _text_kwargs}
)(**presetting)
| return d | conditional_block |
action.py | import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms
from typing import Union, List, Tuple, TypeVar, Callable, NewType, Optional
from func_helper import pip
import func_helper.func_helper.iterator as it
DataSource = Union[dict, pd.DataFrame, pd.Series]
Ax = plt.subplot
AxPlot = Callable[[Ax], Ax]
PlotAction = Callable[..., AxPlot]
Selector = Optional[Union[str, Callable[[DataSource], DataSource]]]
LiteralOrSequence = Optional[Union[int,float,str,list,tuple,DataSource]]
LiteralOrSequencer = Optional[Union[LiteralOrSequence, Callable[[DataSource], DataSource]]]
def plot_action(plotter: PlotAction, arg_names, default_kwargs={}):
"""
Generate plot action by hashable object and some parameters, which takes
matplotlib.pyplot.Axes.subplot and return it.
When some parameters are given as list, duplicate the other parameters
and make multiple plots.
Parameters
----------
plotter: *arg,**kwargs -> ax -> ax
default: dict
Return
------
callable: (kwargs -> df, dict, kwargs) -> (ax -> ax)
""" | kwarg_filter = filter_dict(default_kwargs.keys())
def presetting(setting={}, **setting_kwargs):
def set_data(data_source: DataSource, option: dict={}, **option_kwargs):
"""
Parameters
----------
df: pandas.DataFrame | dict
option: dict, optional
{
"x" : "x_name",
"y" : ["y1", "y2"],
"ylim" : (None,10),
"ylabel" : "Y",
"linewidth" : [1,1.5]
}
kwargs: parameters corresponding to items of option.
"""
list_of_entry = to_flatlist(
{"data":data_source,**default_kwargs, **setting, **setting_kwargs, **option, **option_kwargs})
# print(list_of_entry)
arg_and_kwarg=generate_arg_and_kwags()(
#as_DataFrame(data_source),
#data_source,
list(map(arg_filter, list_of_entry)),
list(map(kwarg_filter, list_of_entry))
)
# return plot action
return lambda ax: it.reducing(
lambda acc, e: plotter(*e[0], **e[1])(acc))(ax)(arg_and_kwarg)
return set_data
return presetting
def as_DataFrame(d: DataSource) -> pd.DataFrame:
if type(d) in [pd.DataFrame, pd.Series]:
return d
elif type(d) in [list, dict]:
return pd.DataFrame(d)
else:
raise TypeError(f"{type(d)} is not available for data source.")
def generate_arg_and_kwags():
"""
Setup positional arguments and keyword arguments for plotter.
"""
def gen_func(
#df: DataSource,
option: List[list],
style: List[dict]
)->List[Tuple[list, dict]]:
if len(option) != len(style):
raise SystemError("option and style must be same size list.")
arg_and_kwarg = []
for o, s in zip(option, style):
arg = [*o]
kwargs = s
arg_and_kwarg.append((arg, kwargs))
return arg_and_kwarg
return gen_func
def get_subset(use_index=True)\
->Callable[[DataSource,Selector], DataSource]:
"""
"""
def f(df: DataSource, k:Selector)->DataSource:
"""
Select value in hashable (pandas.DataFrame, dict, etc.)
"""
if type(df) is pd.DataFrame:
if k in ["index", None]:
return df.index
elif type(k) is str:
return df[k]
elif callable(k):
return k(df)
else:
return df[k]
elif type(df) is pd.Series:
if k in ["index", None]:
return df.index
elif callable(k):
return k(df)
else:
return df
elif type(df) is dict:
if type(k) is str:
return df.get(k,[])
elif callable(k):
return k(df)
else:
return df
else:
#print(df)
raise TypeError("df must be pandas.DataFrame or pandas.Series.")
return f
def get_literal_or_series(input:LiteralOrSequencer, df: DataSource)->LiteralOrSequence:
if callable(input):
return input(df)
else:
return input
def get_value(default=""):
def f(_, k, v):
"""
Return value.
"""
return v if v is not None else default
return f
def is_iterable(o):
return type(o) in [list, tuple]
def to_flatlist(d: dict) -> List[dict]:
"""
Usage
-----
d = {
"x" : (0,1,2),
"y" : [1,2],
"z" : 0
}
to_flatlist(d) is...
[
{"x" : 0, "y" : [1,2], "z" : 0},
{"x" : 1, "y" : [1,2], "z" : 0},
{"x" : 2, "y" : [1,2], "z" : 0}
]
"""
def value_to_list(d: dict) -> dict:
return dict(it.mapping(
lambda kv: (kv[0], kv[1]) if type(
kv[1]) is tuple else (kv[0], [kv[1]])
)(d.items()))
list_dict = value_to_list(d)
max_length = it.reducing(
lambda acc, e: acc if acc >= len(e) else len(e)
)(0)(list_dict.values())
flatlist = []
for i in range(max_length):
new_dict = {}
for k in list_dict.keys():
if len(list_dict[k]) >= i+1:
new_dict.update({(k): list_dict[k][i]})
else:
new_dict.update({(k): list_dict[k][-1]})
flatlist.append(new_dict)
return flatlist
def filter_dict(k: list) -> Callable[[dict], dict]:
return lambda d: dict(
filter(lambda kv: (kv[0] in k) and kv[1] is not None, d.items())
)
def translate_table(table_dict: dict):
return lambda d: {table_dict.get(k, k): v for k, v in d.items()}
def get_values_by_keys(k: list, default=None)->Callable[[dict], list]:
"""
Filter dictionary by list of keys.
Parameters
----------
k: list
default: any, optional
Set as default value for key not in dict.
Default value is None
"""
return lambda d: list(map(lambda key: d.get(key, default), k))
def Iget_factor(
df: pd.DataFrame,
f: Union[str, Callable[[pd.DataFrame], pd.Series]],
factor: Optional[Union[list, Callable[[pd.DataFrame], pd.Series]]]
)->Tuple[pd.Series, list]:
d = f(df) if callable(f) else df[f]
if type(factor) is list:
return (d, factor)
elif callable(factor):
return factor(d)
else:
return (d, d.astype('category').cat.categories)
def selector_or_literal(df, s):
if s is None:
return df.index
elif callable(s):
return s(df)
elif type(s) is list:
return s
elif type(s) in [int, float]:
return [s]
elif s in df:
return df[s]
else:
return df.index
def Icoordinate_transform(ax, xcoordinate: Optional[str], ycoordinate: Optional[str]):
"""
Select coordinate transform method for x and y axis.
"""
return matplotlib.transforms.blended_transform_factory(
ax.transAxes if xcoordinate is "axes" else ax.transData,
ax.transAxes if ycoordinate is "axes" else ax.transData
)
default_kwargs = {}
_tick_params_each = {
"labelsize": 12,
"rotation": 0,
"which": "both",
"direction": "in",
"color": "black",
"labelcolor": "black"
}
_tick_params_kwargs = {
**_tick_params_each,
"labelbottom": None,
"labelleft": None,
"labeltop": None,
"labelright": None,
"bottom": None,
"left": None,
"top": None,
"right": None
}
_label_kwargs = {
"alpha": 1,
"color": "black",
"family": ["Noto Sans CJK JP", "sans-serif"],
# "fontname" : "sans-serif",
"fontsize": 16,
"fontstyle": "normal",
}
_line2d_kwargs = {
"alpha": 1,
"marker": "",
"markeredgecolor": None,
"markeredgewidth": None,
"markerfacecolor": None,
"markerfacecoloralt": None,
"markersize": None,
"linestyle": None,
"linewidth": None,
"color": None,
}
_grid_kwargs:dict = {
"axis": None,
**_line2d_kwargs,
"color": 'gray',
"linestyle": ':',
"linewidth": 1,
}
_line_kwargs = {
**_line2d_kwargs,
"linestyle": "-",
"linewidth": 1,
}
_vhlines_kwargs = {
"color": None,
"linestyle": "-",
"linewidth": 1,
"alpha": 1
}
_scatter_kwargs = {
"c": None,
"s": None,
"cmap": None,
"norm": None,
"vmin": None,
"vmax": None,
"alpha": 1,
"marker": "o",
"edgecolors": "face",
"linewidth": None,
"linestyle": "-"
}
_fill_kwargs = {
"color": "green",
"alpha": 0.5,
"hatch": None
}
_quiver_kwargs = {
"scale": 1,
"scale_units": "dots",
"alpha": 1,
"color": "black",
"width": 1,
"headwidth": 0.1,
"headlength": 0.2
}
_axline_kwargs = {
**_line2d_kwargs,
"alpha": 0.5,
"color": "green",
"linewidth": None,
"linestyle": "-"
}
_box_kwargs = {
"vert": True,
"notch": False,
"sym": None, # Symbol setting for out lier
"whis": 1.5,
"bootstrap": None,
"usermedians": None,
"conf_intervals": None,
"widths": 0.5,
"patch_artist": False,
"manage_xticks": True,
"autorange": False,
"meanline": False,
"zorder": None,
"showcaps": True,
"showbox": True,
"showfliers": True,
"showmeans": False,
"capprops": None,
"boxprops": None,
"whiskerprops": None,
"flierprops": None,
"medianprops": None,
"meanprops": None
}
_violin_kwargs = {
"vert": True,
"widths": 0.5,
"showmeans": False,
"showextrema": True,
"showmedians": False,
"points": 100,
"bw_method": None,
"scale": "width", # "width" | "count"
"bodies": None,
"cmeans": None
}
"""
https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.violin.html
bodies:{
"facecolor" : "#2196f3",
"edgecolor" : "#005588",
"alpha" : 0.5
}
https://matplotlib.org/api/collections_api.html#matplotlib.collections.PolyCollection
cmeans:{
"edgecolor",
"linestyle",
"linewidth",
"alpha"
}
https://matplotlib.org/api/collections_api.html#matplotlib.collections.LineCollection
"""
_text_kwargs = {
"ha": 'left',
"va": 'bottom',
"color": "black",
"family": None,
"fontsize": 10,
"rotation": None,
"style": None,
"xcoordinate": None, # "data" = None | "axes"
"ycoordinate": None, # "data" = None | "axes"
"wrap": False
}
_hist_kwargs = {
"bins": None,
"range": None,
"density": None,
"weights": None,
"cumulative": False,
"bottom": None,
"histtype": 'bar',
"align": 'mid',
"orientation": 'vertical',
"rwidth": None,
"log": False,
"color": "#2196f3",
"label": None,
"stacked": False,
"normed": None,
}
_bar_kwargs = {
"norm": False,
"vert": True,
# "width": 0.8,
"align": "center",
}
default_kwargs.update({
"tick_params_each": _tick_params_each,
"tick_params": _tick_params_kwargs,
"axis_label": _label_kwargs,
"grid": _grid_kwargs,
"line": _line_kwargs,
"vlines": _vhlines_kwargs,
"hlines": _vhlines_kwargs,
"scatter": _scatter_kwargs,
"fill": _fill_kwargs,
"quiver": _quiver_kwargs,
"axline": _axline_kwargs,
"box": _box_kwargs,
"violin": _violin_kwargs,
"text": _text_kwargs,
"hist": _hist_kwargs,
"bar": _bar_kwargs,
})
def _annotate_plotter(df, from_pos, to_pos, text, *arg, textdict={}, **kwargs) -> AxPlot:
def plot(ax):
return ax
return plot
def annotate(**presetting):
return plot_action(
_annotate_plotter,
["from_pos", "to_pos", "text"],
{**_quiver_kwargs, "textdict": _text_kwargs}
)(**presetting) | arg_filter = get_values_by_keys(["data"]+arg_names, None) | random_line_split |
action.py | import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms
from typing import Union, List, Tuple, TypeVar, Callable, NewType, Optional
from func_helper import pip
import func_helper.func_helper.iterator as it
DataSource = Union[dict, pd.DataFrame, pd.Series]
Ax = plt.subplot
AxPlot = Callable[[Ax], Ax]
PlotAction = Callable[..., AxPlot]
Selector = Optional[Union[str, Callable[[DataSource], DataSource]]]
LiteralOrSequence = Optional[Union[int,float,str,list,tuple,DataSource]]
LiteralOrSequencer = Optional[Union[LiteralOrSequence, Callable[[DataSource], DataSource]]]
def plot_action(plotter: PlotAction, arg_names, default_kwargs={}):
"""
Generate plot action by hashable object and some parameters, which takes
matplotlib.pyplot.Axes.subplot and return it.
When some parameters are given as list, duplicate the other parameters
and make multiple plots.
Parameters
----------
plotter: *arg,**kwargs -> ax -> ax
default: dict
Return
------
callable: (kwargs -> df, dict, kwargs) -> (ax -> ax)
"""
arg_filter = get_values_by_keys(["data"]+arg_names, None)
kwarg_filter = filter_dict(default_kwargs.keys())
def presetting(setting={}, **setting_kwargs):
def set_data(data_source: DataSource, option: dict={}, **option_kwargs):
"""
Parameters
----------
df: pandas.DataFrame | dict
option: dict, optional
{
"x" : "x_name",
"y" : ["y1", "y2"],
"ylim" : (None,10),
"ylabel" : "Y",
"linewidth" : [1,1.5]
}
kwargs: parameters corresponding to items of option.
"""
list_of_entry = to_flatlist(
{"data":data_source,**default_kwargs, **setting, **setting_kwargs, **option, **option_kwargs})
# print(list_of_entry)
arg_and_kwarg=generate_arg_and_kwags()(
#as_DataFrame(data_source),
#data_source,
list(map(arg_filter, list_of_entry)),
list(map(kwarg_filter, list_of_entry))
)
# return plot action
return lambda ax: it.reducing(
lambda acc, e: plotter(*e[0], **e[1])(acc))(ax)(arg_and_kwarg)
return set_data
return presetting
def as_DataFrame(d: DataSource) -> pd.DataFrame:
if type(d) in [pd.DataFrame, pd.Series]:
return d
elif type(d) in [list, dict]:
return pd.DataFrame(d)
else:
raise TypeError(f"{type(d)} is not available for data source.")
def generate_arg_and_kwags():
"""
Setup positional arguments and keyword arguments for plotter.
"""
def gen_func(
#df: DataSource,
option: List[list],
style: List[dict]
)->List[Tuple[list, dict]]:
if len(option) != len(style):
raise SystemError("option and style must be same size list.")
arg_and_kwarg = []
for o, s in zip(option, style):
arg = [*o]
kwargs = s
arg_and_kwarg.append((arg, kwargs))
return arg_and_kwarg
return gen_func
def get_subset(use_index=True)\
->Callable[[DataSource,Selector], DataSource]:
"""
"""
def f(df: DataSource, k:Selector)->DataSource:
"""
Select value in hashable (pandas.DataFrame, dict, etc.)
"""
if type(df) is pd.DataFrame:
if k in ["index", None]:
return df.index
elif type(k) is str:
return df[k]
elif callable(k):
return k(df)
else:
return df[k]
elif type(df) is pd.Series:
if k in ["index", None]:
return df.index
elif callable(k):
return k(df)
else:
return df
elif type(df) is dict:
if type(k) is str:
return df.get(k,[])
elif callable(k):
return k(df)
else:
return df
else:
#print(df)
raise TypeError("df must be pandas.DataFrame or pandas.Series.")
return f
def get_literal_or_series(input:LiteralOrSequencer, df: DataSource)->LiteralOrSequence:
if callable(input):
return input(df)
else:
return input
def get_value(default=""):
def | (_, k, v):
"""
Return value.
"""
return v if v is not None else default
return f
def is_iterable(o):
return type(o) in [list, tuple]
def to_flatlist(d: dict) -> List[dict]:
"""
Usage
-----
d = {
"x" : (0,1,2),
"y" : [1,2],
"z" : 0
}
to_flatlist(d) is...
[
{"x" : 0, "y" : [1,2], "z" : 0},
{"x" : 1, "y" : [1,2], "z" : 0},
{"x" : 2, "y" : [1,2], "z" : 0}
]
"""
def value_to_list(d: dict) -> dict:
return dict(it.mapping(
lambda kv: (kv[0], kv[1]) if type(
kv[1]) is tuple else (kv[0], [kv[1]])
)(d.items()))
list_dict = value_to_list(d)
max_length = it.reducing(
lambda acc, e: acc if acc >= len(e) else len(e)
)(0)(list_dict.values())
flatlist = []
for i in range(max_length):
new_dict = {}
for k in list_dict.keys():
if len(list_dict[k]) >= i+1:
new_dict.update({(k): list_dict[k][i]})
else:
new_dict.update({(k): list_dict[k][-1]})
flatlist.append(new_dict)
return flatlist
def filter_dict(k: list) -> Callable[[dict], dict]:
return lambda d: dict(
filter(lambda kv: (kv[0] in k) and kv[1] is not None, d.items())
)
def translate_table(table_dict: dict):
return lambda d: {table_dict.get(k, k): v for k, v in d.items()}
def get_values_by_keys(k: list, default=None)->Callable[[dict], list]:
"""
Filter dictionary by list of keys.
Parameters
----------
k: list
default: any, optional
Set as default value for key not in dict.
Default value is None
"""
return lambda d: list(map(lambda key: d.get(key, default), k))
def Iget_factor(
df: pd.DataFrame,
f: Union[str, Callable[[pd.DataFrame], pd.Series]],
factor: Optional[Union[list, Callable[[pd.DataFrame], pd.Series]]]
)->Tuple[pd.Series, list]:
d = f(df) if callable(f) else df[f]
if type(factor) is list:
return (d, factor)
elif callable(factor):
return factor(d)
else:
return (d, d.astype('category').cat.categories)
def selector_or_literal(df, s):
if s is None:
return df.index
elif callable(s):
return s(df)
elif type(s) is list:
return s
elif type(s) in [int, float]:
return [s]
elif s in df:
return df[s]
else:
return df.index
def Icoordinate_transform(ax, xcoordinate: Optional[str], ycoordinate: Optional[str]):
"""
Select coordinate transform method for x and y axis.
"""
return matplotlib.transforms.blended_transform_factory(
ax.transAxes if xcoordinate is "axes" else ax.transData,
ax.transAxes if ycoordinate is "axes" else ax.transData
)
default_kwargs = {}
_tick_params_each = {
"labelsize": 12,
"rotation": 0,
"which": "both",
"direction": "in",
"color": "black",
"labelcolor": "black"
}
_tick_params_kwargs = {
**_tick_params_each,
"labelbottom": None,
"labelleft": None,
"labeltop": None,
"labelright": None,
"bottom": None,
"left": None,
"top": None,
"right": None
}
_label_kwargs = {
"alpha": 1,
"color": "black",
"family": ["Noto Sans CJK JP", "sans-serif"],
# "fontname" : "sans-serif",
"fontsize": 16,
"fontstyle": "normal",
}
_line2d_kwargs = {
"alpha": 1,
"marker": "",
"markeredgecolor": None,
"markeredgewidth": None,
"markerfacecolor": None,
"markerfacecoloralt": None,
"markersize": None,
"linestyle": None,
"linewidth": None,
"color": None,
}
_grid_kwargs:dict = {
"axis": None,
**_line2d_kwargs,
"color": 'gray',
"linestyle": ':',
"linewidth": 1,
}
_line_kwargs = {
**_line2d_kwargs,
"linestyle": "-",
"linewidth": 1,
}
_vhlines_kwargs = {
"color": None,
"linestyle": "-",
"linewidth": 1,
"alpha": 1
}
_scatter_kwargs = {
"c": None,
"s": None,
"cmap": None,
"norm": None,
"vmin": None,
"vmax": None,
"alpha": 1,
"marker": "o",
"edgecolors": "face",
"linewidth": None,
"linestyle": "-"
}
_fill_kwargs = {
"color": "green",
"alpha": 0.5,
"hatch": None
}
_quiver_kwargs = {
"scale": 1,
"scale_units": "dots",
"alpha": 1,
"color": "black",
"width": 1,
"headwidth": 0.1,
"headlength": 0.2
}
_axline_kwargs = {
**_line2d_kwargs,
"alpha": 0.5,
"color": "green",
"linewidth": None,
"linestyle": "-"
}
_box_kwargs = {
"vert": True,
"notch": False,
"sym": None, # Symbol setting for out lier
"whis": 1.5,
"bootstrap": None,
"usermedians": None,
"conf_intervals": None,
"widths": 0.5,
"patch_artist": False,
"manage_xticks": True,
"autorange": False,
"meanline": False,
"zorder": None,
"showcaps": True,
"showbox": True,
"showfliers": True,
"showmeans": False,
"capprops": None,
"boxprops": None,
"whiskerprops": None,
"flierprops": None,
"medianprops": None,
"meanprops": None
}
_violin_kwargs = {
"vert": True,
"widths": 0.5,
"showmeans": False,
"showextrema": True,
"showmedians": False,
"points": 100,
"bw_method": None,
"scale": "width", # "width" | "count"
"bodies": None,
"cmeans": None
}
"""
https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.violin.html
bodies:{
"facecolor" : "#2196f3",
"edgecolor" : "#005588",
"alpha" : 0.5
}
https://matplotlib.org/api/collections_api.html#matplotlib.collections.PolyCollection
cmeans:{
"edgecolor",
"linestyle",
"linewidth",
"alpha"
}
https://matplotlib.org/api/collections_api.html#matplotlib.collections.LineCollection
"""
_text_kwargs = {
"ha": 'left',
"va": 'bottom',
"color": "black",
"family": None,
"fontsize": 10,
"rotation": None,
"style": None,
"xcoordinate": None, # "data" = None | "axes"
"ycoordinate": None, # "data" = None | "axes"
"wrap": False
}
_hist_kwargs = {
"bins": None,
"range": None,
"density": None,
"weights": None,
"cumulative": False,
"bottom": None,
"histtype": 'bar',
"align": 'mid',
"orientation": 'vertical',
"rwidth": None,
"log": False,
"color": "#2196f3",
"label": None,
"stacked": False,
"normed": None,
}
_bar_kwargs = {
"norm": False,
"vert": True,
# "width": 0.8,
"align": "center",
}
default_kwargs.update({
"tick_params_each": _tick_params_each,
"tick_params": _tick_params_kwargs,
"axis_label": _label_kwargs,
"grid": _grid_kwargs,
"line": _line_kwargs,
"vlines": _vhlines_kwargs,
"hlines": _vhlines_kwargs,
"scatter": _scatter_kwargs,
"fill": _fill_kwargs,
"quiver": _quiver_kwargs,
"axline": _axline_kwargs,
"box": _box_kwargs,
"violin": _violin_kwargs,
"text": _text_kwargs,
"hist": _hist_kwargs,
"bar": _bar_kwargs,
})
def _annotate_plotter(df, from_pos, to_pos, text, *arg, textdict={}, **kwargs) -> AxPlot:
def plot(ax):
return ax
return plot
def annotate(**presetting):
return plot_action(
_annotate_plotter,
["from_pos", "to_pos", "text"],
{**_quiver_kwargs, "textdict": _text_kwargs}
)(**presetting)
| f | identifier_name |
transformer.go | package transformer
import (
"sort"
"strings"
"github.com/bblfsh/sdk/v3/uast"
"github.com/bblfsh/sdk/v3/uast/nodes"
)
const optimizeCheck = true
// Transformer is an interface for transformations that operates on AST trees.
// An implementation is responsible for walking the tree and executing transformation on each AST node.
type Transformer interface {
Do(root nodes.Node) (nodes.Node, error)
}
// CodeTransformer is a special case of Transformer that needs an original source code to operate.
type CodeTransformer interface {
OnCode(code string) Transformer
}
// Sel is an operation that can verify if a specific node matches a set of constraints or not.
type Sel interface {
// Kinds returns a mask of all nodes kinds that this operation might match.
Kinds() nodes.Kind
// Check will verify constraints for a single node and returns true if an objects matches them.
// It can also populate the State with variables that can be used later to Construct a different object from the State.
Check(st *State, n nodes.Node) (bool, error)
}
// Mod is an operation that can reconstruct an AST node from a given State.
type Mod interface {
// Construct will use variables stored in State to reconstruct an AST node.
// Node that is provided as an argument may be used as a base for reconstruction.
Construct(st *State, n nodes.Node) (nodes.Node, error)
}
// Op is a generic AST transformation step that describes a shape of an AST tree.
// It can be used to either check the constraints for a specific node and populate state, or to reconstruct an AST shape
// from a the same state (probably produced by another Op).
type Op interface {
Sel
Mod
}
// Transformers appends all provided transformer slices into single one.
func Transformers(arr ...[]Transformer) []Transformer {
var out []Transformer
for _, a := range arr {
out = append(out, a...)
}
return out
}
var _ Transformer = (TransformFunc)(nil)
// TransformFunc is a function that will be applied to each AST node to transform the tree.
// It returns a new AST and true if tree was changed, or an old node and false if no modifications were done.
// The the tree will be traversed automatically and the callback will be called for each node.
type TransformFunc func(n nodes.Node) (nodes.Node, bool, error)
// Do runs a transformation function for each AST node.
func (f TransformFunc) Do(n nodes.Node) (nodes.Node, error) {
var last error
nn, ok := nodes.Apply(n, func(n nodes.Node) (nodes.Node, bool) {
nn, ok, err := f(n)
if err != nil {
last = err
return n, false
} else if !ok {
return n, false
}
return nn, ok
})
if ok {
return nn, last
}
return n, last
}
var _ Transformer = (TransformObjFunc)(nil)
// TransformObjFunc is like TransformFunc, but only matches Object nodes.
type TransformObjFunc func(n nodes.Object) (nodes.Object, bool, error)
// Func converts this TransformObjFunc to a regular TransformFunc by skipping all non-object nodes.
func (f TransformObjFunc) Func() TransformFunc {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
nn, ok, err := f(obj)
if err != nil {
return n, false, err
} else if !ok {
return n, false, nil
}
return nn, ok, nil
})
}
// Do runs a transformation function for each AST node.
func (f TransformObjFunc) Do(n nodes.Node) (nodes.Node, error) {
return f.Func().Do(n)
}
// Map creates a two-way mapping between two transform operations.
// The first operation will be used to check constraints for each node and store state, while the second one will use
// the state to construct a new tree.
func Map(src, dst Op) Mapping |
func MapObj(src, dst ObjectOp) ObjMapping {
return objMapping{src: src, dst: dst}
}
func MapPart(vr string, m ObjMapping) ObjMapping {
src, dst := m.ObjMapping()
_, sok := src.Fields()
_, dok := dst.Fields()
if !sok && !dok {
// both contain partial op, ignore current label
return MapObj(src, dst)
} else if sok != dok {
panic("inconsistent use of Part")
}
return MapObj(Part(vr, src), Part(vr, dst))
}
func Identity(op Op) Mapping {
return Map(op, op)
}
type Mapping interface {
Mapping() (src, dst Op)
}
type ObjMapping interface {
Mapping
ObjMapping() (src, dst ObjectOp)
}
type MappingOp interface {
Op
Mapping
}
type mapping struct {
src, dst Op
}
func (m mapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
type objMapping struct {
src, dst ObjectOp
}
func (m objMapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
func (m objMapping) ObjMapping() (src, dst ObjectOp) {
return m.src, m.dst
}
// Reverse changes a transformation direction, allowing to construct the source tree.
func Reverse(m Mapping) Mapping {
src, dst := m.Mapping()
return Map(dst, src)
}
func (m mapping) apply(root nodes.Node) (nodes.Node, error) {
src, dst := m.src, m.dst
var errs []error
_, objOp := src.(ObjectOp)
_, arrOp := src.(ArrayOp)
st := NewState()
nn, ok := nodes.Apply(root, func(n nodes.Node) (nodes.Node, bool) {
if n != nil {
if objOp {
if _, ok := n.(nodes.Object); !ok {
return n, false
}
} else if arrOp {
if _, ok := n.(nodes.Array); !ok {
return n, false
}
}
}
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
return n, false
} else if !ok {
return n, false
}
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
return n, false
}
return nn, true
})
err := NewMultiError(errs...)
if ok {
return nn, err
}
return root, err
}
// Mappings takes multiple mappings and optimizes the process of applying them as a single transformation.
func Mappings(maps ...Mapping) Transformer {
if len(maps) == 0 {
return mappings{}
}
mp := mappings{
all: maps,
}
if optimizeCheck {
mp.byKind = make(map[nodes.Kind][]Mapping)
mp.index()
}
return mp
}
type mappings struct {
all []Mapping
// indexed mappings
byKind map[nodes.Kind][]Mapping // mappings applied to specific node kind
typedObj map[string][]Mapping // mappings for objects with specific type
typedAny []Mapping // mappings for any typed object (operations that does not mention the type)
}
func (m *mappings) index() {
precompile := func(m Mapping) Mapping {
return Map(m.Mapping())
}
type ordered struct {
ind int
mp Mapping
}
var typedAny []ordered
typed := make(map[string][]ordered)
for i, mp := range m.all {
// pre-compile object operations (sort fields for unordered ops, etc)
mp = precompile(mp)
oop, _ := mp.Mapping()
if chk, ok := oop.(*opCheck); ok {
oop = chk.op
}
// switch by operation type and make a separate list
// next time we will see a node with matching type, we will apply only specific ops
for _, k := range oop.Kinds().Split() {
m.byKind[k] = append(m.byKind[k], mp)
}
switch op := oop.(type) {
case ObjectOp:
specific := false
fields, _ := op.Fields()
if f, ok := fields.Get(uast.KeyType); ok && !f.Optional {
if f.Fixed != nil {
typ := *f.Fixed
if typ, ok := typ.(nodes.String); ok {
s := string(typ)
typed[s] = append(typed[s], ordered{ind: i, mp: mp})
specific = true
}
}
}
if !specific {
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
default:
// the type is unknown, thus we should try to apply it to objects and array as well
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
}
m.typedObj = make(map[string][]Mapping, len(typed))
for typ, ord := range typed {
ord = append(ord, typedAny...)
sort.Slice(ord, func(i, j int) bool {
return ord[i].ind < ord[j].ind
})
maps := make([]Mapping, 0, len(ord))
for _, o := range ord {
maps = append(maps, o.mp)
}
m.typedObj[typ] = maps
}
}
func (m mappings) Do(root nodes.Node) (nodes.Node, error) {
var errs []error
st := NewState()
nn, ok := nodes.Apply(root, func(old nodes.Node) (nodes.Node, bool) {
var maps []Mapping
if !optimizeCheck {
maps = m.all
} else {
maps = m.byKind[nodes.KindOf(old)]
switch old := old.(type) {
case nodes.Object:
if typ, ok := old[uast.KeyType].(nodes.String); ok {
if mp, ok := m.typedObj[string(typ)]; ok {
maps = mp
}
}
}
}
n := old
applied := false
for _, mp := range maps {
src, dst := mp.Mapping()
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
continue
} else if !ok {
continue
}
applied = true
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
continue
}
n = nn
}
if !applied {
return old, false
}
return n, true
})
err := NewMultiError(errs...)
if err == nil {
err = st.Validate()
}
if ok {
return nn, err
}
return root, err
}
// NewState creates a new state for Ops to work on.
// It stores variables, flags and anything that necessary
// for transformation steps to persist data.
func NewState() *State {
return &State{}
}
// Vars is a set of variables with their values.
type Vars map[string]nodes.Node
// State stores all variables (placeholder values, flags and wny other state) between Check and Construct steps.
type State struct {
vars Vars
unused map[string]struct{}
states map[string][]*State
}
// Reset clears the state and allows to reuse an object.
func (st *State) Reset() {
st.vars = nil
st.unused = nil
st.states = nil
}
// Validate should be called after a successful transformation to check if there are any errors related to unused state.
func (st *State) Validate() error {
if len(st.unused) == 0 {
return nil
}
names := make([]string, 0, len(st.unused))
for name := range st.unused {
names = append(names, name)
}
sort.Strings(names)
return ErrVariableUnused.New(names)
}
// Clone will return a copy of the State. This can be used to apply Check and throw away any variables produced by it.
// To merge a cloned state back use ApplyFrom on a parent state.
func (st *State) Clone() *State {
st2 := NewState()
if len(st.vars) != 0 {
st2.vars = make(Vars)
st2.unused = make(map[string]struct{})
}
for k, v := range st.vars {
st2.vars[k] = v
}
for k := range st.unused {
st2.unused[k] = struct{}{}
}
if len(st.states) != 0 {
st2.states = make(map[string][]*State)
}
for k, v := range st.states {
st2.states[k] = v
}
return st2
}
// ApplyFrom merges a provided state into this state object.
func (st *State) ApplyFrom(st2 *State) {
if len(st2.vars) != 0 && st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
for k, v := range st2.vars {
if _, ok := st.vars[k]; !ok {
st.vars[k] = v
}
}
for k := range st2.unused {
st.unused[k] = struct{}{}
}
if len(st2.states) != 0 && st.states == nil {
st.states = make(map[string][]*State)
}
for k, v := range st2.states {
if _, ok := st.states[k]; !ok {
st.states[k] = v
}
}
}
// GetVar looks up a named variable.
func (st *State) GetVar(name string) (nodes.Node, bool) {
n, ok := st.vars[name]
if ok {
delete(st.unused, name)
}
return n, ok
}
// MustGetVar looks up a named variable and returns ErrVariableNotDefined in case it does not exists.
func (st *State) MustGetVar(name string) (nodes.Node, error) {
n, ok := st.GetVar(name)
if !ok {
return nil, ErrVariableNotDefined.New(name)
}
return n, nil
}
// VarsPtrs is a set of variable pointers.
type VarsPtrs map[string]nodes.NodePtr
// MustGetVars is like MustGetVar but fetches multiple variables in one operation.
func (st *State) MustGetVars(vars VarsPtrs) error {
for name, dst := range vars {
n, ok := st.GetVar(name)
if !ok {
return ErrVariableNotDefined.New(name)
}
if err := dst.SetNode(n); err != nil {
return err
}
}
return nil
}
// SetVar sets a named variable. It will return ErrVariableRedeclared if a variable with the same name is already set.
// It will ignore the operation if variable already exists and has the same value (nodes.Value).
func (st *State) SetVar(name string, val nodes.Node) error {
cur, ok := st.vars[name]
if !ok {
// not declared
if st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
st.vars[name] = val
st.unused[name] = struct{}{}
return nil
}
if nodes.Equal(cur, val) {
// already declared, and the same value is already in the map
return nil
}
return ErrVariableRedeclared.New(name, cur, val)
}
// SetVars is like SetVar but sets multiple variables in one operation.
func (st *State) SetVars(vars Vars) error {
for k, v := range vars {
if err := st.SetVar(k, v); err != nil {
return err
}
}
return nil
}
// GetStateVar returns a stored sub-state from a named variable.
func (st *State) GetStateVar(name string) ([]*State, bool) {
n, ok := st.states[name]
return n, ok
}
// SetStateVar sets a sub-state variable. It returns ErrVariableRedeclared if the variable with this name already exists.
func (st *State) SetStateVar(name string, sub []*State) error {
cur, ok := st.states[name]
if ok {
return ErrVariableRedeclared.New(name, cur, sub)
}
if st.states == nil {
st.states = make(map[string][]*State)
}
st.states[name] = sub
return nil
}
// DefaultNamespace is a transform that sets a specified namespace for predicates and values that doesn't have a namespace.
func DefaultNamespace(ns string) Transformer {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
tp, ok := obj[uast.KeyType].(nodes.String)
if !ok {
return n, false, nil
}
if strings.Contains(string(tp), ":") {
return n, false, nil
}
obj = obj.CloneObject()
obj[uast.KeyType] = nodes.String(ns + ":" + string(tp))
return obj, true, nil
})
}
| {
return mapping{src: src, dst: dst}
} | identifier_body |
transformer.go | package transformer
import (
"sort"
"strings"
"github.com/bblfsh/sdk/v3/uast"
"github.com/bblfsh/sdk/v3/uast/nodes"
)
const optimizeCheck = true
// Transformer is an interface for transformations that operates on AST trees.
// An implementation is responsible for walking the tree and executing transformation on each AST node.
type Transformer interface {
Do(root nodes.Node) (nodes.Node, error)
}
// CodeTransformer is a special case of Transformer that needs an original source code to operate.
type CodeTransformer interface {
OnCode(code string) Transformer
}
// Sel is an operation that can verify if a specific node matches a set of constraints or not.
type Sel interface {
// Kinds returns a mask of all nodes kinds that this operation might match.
Kinds() nodes.Kind
// Check will verify constraints for a single node and returns true if an objects matches them.
// It can also populate the State with variables that can be used later to Construct a different object from the State.
Check(st *State, n nodes.Node) (bool, error)
}
// Mod is an operation that can reconstruct an AST node from a given State.
type Mod interface {
// Construct will use variables stored in State to reconstruct an AST node.
// Node that is provided as an argument may be used as a base for reconstruction.
Construct(st *State, n nodes.Node) (nodes.Node, error)
}
// Op is a generic AST transformation step that describes a shape of an AST tree.
// It can be used to either check the constraints for a specific node and populate state, or to reconstruct an AST shape
// from a the same state (probably produced by another Op).
type Op interface {
Sel
Mod
}
// Transformers appends all provided transformer slices into single one.
func Transformers(arr ...[]Transformer) []Transformer {
var out []Transformer
for _, a := range arr {
out = append(out, a...)
}
return out
}
var _ Transformer = (TransformFunc)(nil)
// TransformFunc is a function that will be applied to each AST node to transform the tree.
// It returns a new AST and true if tree was changed, or an old node and false if no modifications were done.
// The the tree will be traversed automatically and the callback will be called for each node.
type TransformFunc func(n nodes.Node) (nodes.Node, bool, error)
// Do runs a transformation function for each AST node.
func (f TransformFunc) Do(n nodes.Node) (nodes.Node, error) {
var last error
nn, ok := nodes.Apply(n, func(n nodes.Node) (nodes.Node, bool) {
nn, ok, err := f(n)
if err != nil {
last = err
return n, false
} else if !ok {
return n, false
}
return nn, ok
})
if ok {
return nn, last
}
return n, last
}
var _ Transformer = (TransformObjFunc)(nil)
// TransformObjFunc is like TransformFunc, but only matches Object nodes.
type TransformObjFunc func(n nodes.Object) (nodes.Object, bool, error)
// Func converts this TransformObjFunc to a regular TransformFunc by skipping all non-object nodes.
func (f TransformObjFunc) Func() TransformFunc {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
nn, ok, err := f(obj)
if err != nil {
return n, false, err
} else if !ok {
return n, false, nil
}
return nn, ok, nil
})
}
// Do runs a transformation function for each AST node.
func (f TransformObjFunc) Do(n nodes.Node) (nodes.Node, error) {
return f.Func().Do(n)
}
// Map creates a two-way mapping between two transform operations.
// The first operation will be used to check constraints for each node and store state, while the second one will use
// the state to construct a new tree.
func Map(src, dst Op) Mapping {
return mapping{src: src, dst: dst}
}
func MapObj(src, dst ObjectOp) ObjMapping {
return objMapping{src: src, dst: dst}
}
func MapPart(vr string, m ObjMapping) ObjMapping {
src, dst := m.ObjMapping()
_, sok := src.Fields()
_, dok := dst.Fields()
if !sok && !dok {
// both contain partial op, ignore current label
return MapObj(src, dst)
} else if sok != dok {
panic("inconsistent use of Part")
}
return MapObj(Part(vr, src), Part(vr, dst))
}
func Identity(op Op) Mapping {
return Map(op, op)
}
type Mapping interface {
Mapping() (src, dst Op)
}
type ObjMapping interface {
Mapping
ObjMapping() (src, dst ObjectOp)
}
type MappingOp interface {
Op
Mapping
}
type mapping struct {
src, dst Op
}
func (m mapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
type objMapping struct {
src, dst ObjectOp
}
func (m objMapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
func (m objMapping) ObjMapping() (src, dst ObjectOp) {
return m.src, m.dst
}
// Reverse changes a transformation direction, allowing to construct the source tree.
func Reverse(m Mapping) Mapping {
src, dst := m.Mapping()
return Map(dst, src)
}
func (m mapping) apply(root nodes.Node) (nodes.Node, error) {
src, dst := m.src, m.dst
var errs []error
_, objOp := src.(ObjectOp)
_, arrOp := src.(ArrayOp)
st := NewState()
nn, ok := nodes.Apply(root, func(n nodes.Node) (nodes.Node, bool) {
if n != nil {
if objOp {
if _, ok := n.(nodes.Object); !ok {
return n, false
}
} else if arrOp {
if _, ok := n.(nodes.Array); !ok {
return n, false
}
}
}
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
return n, false
} else if !ok {
return n, false
}
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
return n, false
}
return nn, true
})
err := NewMultiError(errs...)
if ok {
return nn, err
}
return root, err
}
// Mappings takes multiple mappings and optimizes the process of applying them as a single transformation.
func Mappings(maps ...Mapping) Transformer {
if len(maps) == 0 {
return mappings{}
}
mp := mappings{
all: maps,
}
if optimizeCheck {
mp.byKind = make(map[nodes.Kind][]Mapping)
mp.index()
}
return mp
}
type mappings struct {
all []Mapping
// indexed mappings
byKind map[nodes.Kind][]Mapping // mappings applied to specific node kind
typedObj map[string][]Mapping // mappings for objects with specific type
typedAny []Mapping // mappings for any typed object (operations that does not mention the type)
}
func (m *mappings) index() {
precompile := func(m Mapping) Mapping {
return Map(m.Mapping())
}
type ordered struct {
ind int
mp Mapping
}
var typedAny []ordered
typed := make(map[string][]ordered)
for i, mp := range m.all {
// pre-compile object operations (sort fields for unordered ops, etc)
mp = precompile(mp)
oop, _ := mp.Mapping()
if chk, ok := oop.(*opCheck); ok {
oop = chk.op
}
// switch by operation type and make a separate list
// next time we will see a node with matching type, we will apply only specific ops
for _, k := range oop.Kinds().Split() {
m.byKind[k] = append(m.byKind[k], mp)
}
switch op := oop.(type) {
case ObjectOp:
specific := false
fields, _ := op.Fields()
if f, ok := fields.Get(uast.KeyType); ok && !f.Optional {
if f.Fixed != nil {
typ := *f.Fixed
if typ, ok := typ.(nodes.String); ok {
s := string(typ)
typed[s] = append(typed[s], ordered{ind: i, mp: mp})
specific = true
}
}
}
if !specific {
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
default:
// the type is unknown, thus we should try to apply it to objects and array as well
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
}
m.typedObj = make(map[string][]Mapping, len(typed))
for typ, ord := range typed {
ord = append(ord, typedAny...)
sort.Slice(ord, func(i, j int) bool {
return ord[i].ind < ord[j].ind
})
maps := make([]Mapping, 0, len(ord))
for _, o := range ord {
maps = append(maps, o.mp)
}
m.typedObj[typ] = maps
}
}
func (m mappings) Do(root nodes.Node) (nodes.Node, error) {
var errs []error
st := NewState()
nn, ok := nodes.Apply(root, func(old nodes.Node) (nodes.Node, bool) {
var maps []Mapping
if !optimizeCheck {
maps = m.all
} else {
maps = m.byKind[nodes.KindOf(old)]
switch old := old.(type) {
case nodes.Object:
if typ, ok := old[uast.KeyType].(nodes.String); ok {
if mp, ok := m.typedObj[string(typ)]; ok {
maps = mp
}
}
}
}
n := old
applied := false
for _, mp := range maps {
src, dst := mp.Mapping()
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
continue
} else if !ok {
continue
}
applied = true
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
continue
}
n = nn
}
if !applied {
return old, false
}
return n, true
})
err := NewMultiError(errs...)
if err == nil {
err = st.Validate()
}
if ok {
return nn, err
}
return root, err
}
// NewState creates a new state for Ops to work on.
// It stores variables, flags and anything that necessary
// for transformation steps to persist data.
func NewState() *State {
return &State{}
}
// Vars is a set of variables with their values.
type Vars map[string]nodes.Node
// State stores all variables (placeholder values, flags and wny other state) between Check and Construct steps.
type State struct {
vars Vars
unused map[string]struct{}
states map[string][]*State
}
// Reset clears the state and allows to reuse an object.
func (st *State) Reset() {
st.vars = nil
st.unused = nil
st.states = nil
}
// Validate should be called after a successful transformation to check if there are any errors related to unused state.
func (st *State) Validate() error {
if len(st.unused) == 0 {
return nil
}
names := make([]string, 0, len(st.unused))
for name := range st.unused {
names = append(names, name)
}
sort.Strings(names)
return ErrVariableUnused.New(names)
}
// Clone will return a copy of the State. This can be used to apply Check and throw away any variables produced by it.
// To merge a cloned state back use ApplyFrom on a parent state.
func (st *State) Clone() *State {
st2 := NewState()
if len(st.vars) != 0 {
st2.vars = make(Vars)
st2.unused = make(map[string]struct{})
}
for k, v := range st.vars {
st2.vars[k] = v
}
for k := range st.unused {
st2.unused[k] = struct{}{}
}
if len(st.states) != 0 {
st2.states = make(map[string][]*State)
}
for k, v := range st.states {
st2.states[k] = v
}
return st2
}
// ApplyFrom merges a provided state into this state object.
func (st *State) ApplyFrom(st2 *State) {
if len(st2.vars) != 0 && st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
for k, v := range st2.vars {
if _, ok := st.vars[k]; !ok {
st.vars[k] = v
}
}
for k := range st2.unused {
st.unused[k] = struct{}{}
}
if len(st2.states) != 0 && st.states == nil {
st.states = make(map[string][]*State)
}
for k, v := range st2.states {
if _, ok := st.states[k]; !ok {
st.states[k] = v
}
}
}
// GetVar looks up a named variable.
func (st *State) GetVar(name string) (nodes.Node, bool) {
n, ok := st.vars[name]
if ok {
delete(st.unused, name)
}
return n, ok
}
// MustGetVar looks up a named variable and returns ErrVariableNotDefined in case it does not exists.
func (st *State) MustGetVar(name string) (nodes.Node, error) {
n, ok := st.GetVar(name)
if !ok {
return nil, ErrVariableNotDefined.New(name)
}
return n, nil
}
// VarsPtrs is a set of variable pointers.
type VarsPtrs map[string]nodes.NodePtr
// MustGetVars is like MustGetVar but fetches multiple variables in one operation.
func (st *State) MustGetVars(vars VarsPtrs) error {
for name, dst := range vars {
n, ok := st.GetVar(name)
if !ok {
return ErrVariableNotDefined.New(name)
}
if err := dst.SetNode(n); err != nil {
return err
}
}
return nil
}
// SetVar sets a named variable. It will return ErrVariableRedeclared if a variable with the same name is already set.
// It will ignore the operation if variable already exists and has the same value (nodes.Value).
func (st *State) SetVar(name string, val nodes.Node) error {
cur, ok := st.vars[name]
if !ok {
// not declared
if st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
st.vars[name] = val
st.unused[name] = struct{}{}
return nil
}
if nodes.Equal(cur, val) {
// already declared, and the same value is already in the map
return nil
}
return ErrVariableRedeclared.New(name, cur, val)
}
// SetVars is like SetVar but sets multiple variables in one operation.
func (st *State) SetVars(vars Vars) error {
for k, v := range vars {
if err := st.SetVar(k, v); err != nil {
return err
}
}
return nil
} | }
// SetStateVar sets a sub-state variable. It returns ErrVariableRedeclared if the variable with this name already exists.
func (st *State) SetStateVar(name string, sub []*State) error {
cur, ok := st.states[name]
if ok {
return ErrVariableRedeclared.New(name, cur, sub)
}
if st.states == nil {
st.states = make(map[string][]*State)
}
st.states[name] = sub
return nil
}
// DefaultNamespace is a transform that sets a specified namespace for predicates and values that doesn't have a namespace.
func DefaultNamespace(ns string) Transformer {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
tp, ok := obj[uast.KeyType].(nodes.String)
if !ok {
return n, false, nil
}
if strings.Contains(string(tp), ":") {
return n, false, nil
}
obj = obj.CloneObject()
obj[uast.KeyType] = nodes.String(ns + ":" + string(tp))
return obj, true, nil
})
} |
// GetStateVar returns a stored sub-state from a named variable.
func (st *State) GetStateVar(name string) ([]*State, bool) {
n, ok := st.states[name]
return n, ok | random_line_split |
transformer.go | package transformer
import (
"sort"
"strings"
"github.com/bblfsh/sdk/v3/uast"
"github.com/bblfsh/sdk/v3/uast/nodes"
)
const optimizeCheck = true
// Transformer is an interface for transformations that operates on AST trees.
// An implementation is responsible for walking the tree and executing transformation on each AST node.
type Transformer interface {
Do(root nodes.Node) (nodes.Node, error)
}
// CodeTransformer is a special case of Transformer that needs an original source code to operate.
type CodeTransformer interface {
OnCode(code string) Transformer
}
// Sel is an operation that can verify if a specific node matches a set of constraints or not.
type Sel interface {
// Kinds returns a mask of all nodes kinds that this operation might match.
Kinds() nodes.Kind
// Check will verify constraints for a single node and returns true if an objects matches them.
// It can also populate the State with variables that can be used later to Construct a different object from the State.
Check(st *State, n nodes.Node) (bool, error)
}
// Mod is an operation that can reconstruct an AST node from a given State.
type Mod interface {
// Construct will use variables stored in State to reconstruct an AST node.
// Node that is provided as an argument may be used as a base for reconstruction.
Construct(st *State, n nodes.Node) (nodes.Node, error)
}
// Op is a generic AST transformation step that describes a shape of an AST tree.
// It can be used to either check the constraints for a specific node and populate state, or to reconstruct an AST shape
// from a the same state (probably produced by another Op).
type Op interface {
Sel
Mod
}
// Transformers appends all provided transformer slices into single one.
func Transformers(arr ...[]Transformer) []Transformer {
var out []Transformer
for _, a := range arr {
out = append(out, a...)
}
return out
}
var _ Transformer = (TransformFunc)(nil)
// TransformFunc is a function that will be applied to each AST node to transform the tree.
// It returns a new AST and true if tree was changed, or an old node and false if no modifications were done.
// The the tree will be traversed automatically and the callback will be called for each node.
type TransformFunc func(n nodes.Node) (nodes.Node, bool, error)
// Do runs a transformation function for each AST node.
func (f TransformFunc) Do(n nodes.Node) (nodes.Node, error) {
var last error
nn, ok := nodes.Apply(n, func(n nodes.Node) (nodes.Node, bool) {
nn, ok, err := f(n)
if err != nil {
last = err
return n, false
} else if !ok {
return n, false
}
return nn, ok
})
if ok {
return nn, last
}
return n, last
}
var _ Transformer = (TransformObjFunc)(nil)
// TransformObjFunc is like TransformFunc, but only matches Object nodes.
type TransformObjFunc func(n nodes.Object) (nodes.Object, bool, error)
// Func converts this TransformObjFunc to a regular TransformFunc by skipping all non-object nodes.
func (f TransformObjFunc) Func() TransformFunc {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
nn, ok, err := f(obj)
if err != nil {
return n, false, err
} else if !ok {
return n, false, nil
}
return nn, ok, nil
})
}
// Do runs a transformation function for each AST node.
func (f TransformObjFunc) Do(n nodes.Node) (nodes.Node, error) {
return f.Func().Do(n)
}
// Map creates a two-way mapping between two transform operations.
// The first operation will be used to check constraints for each node and store state, while the second one will use
// the state to construct a new tree.
func Map(src, dst Op) Mapping {
return mapping{src: src, dst: dst}
}
func MapObj(src, dst ObjectOp) ObjMapping {
return objMapping{src: src, dst: dst}
}
func MapPart(vr string, m ObjMapping) ObjMapping {
src, dst := m.ObjMapping()
_, sok := src.Fields()
_, dok := dst.Fields()
if !sok && !dok {
// both contain partial op, ignore current label
return MapObj(src, dst)
} else if sok != dok {
panic("inconsistent use of Part")
}
return MapObj(Part(vr, src), Part(vr, dst))
}
func Identity(op Op) Mapping {
return Map(op, op)
}
type Mapping interface {
Mapping() (src, dst Op)
}
type ObjMapping interface {
Mapping
ObjMapping() (src, dst ObjectOp)
}
type MappingOp interface {
Op
Mapping
}
type mapping struct {
src, dst Op
}
func (m mapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
type objMapping struct {
src, dst ObjectOp
}
func (m objMapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
func (m objMapping) ObjMapping() (src, dst ObjectOp) {
return m.src, m.dst
}
// Reverse changes a transformation direction, allowing to construct the source tree.
func Reverse(m Mapping) Mapping {
src, dst := m.Mapping()
return Map(dst, src)
}
func (m mapping) apply(root nodes.Node) (nodes.Node, error) {
src, dst := m.src, m.dst
var errs []error
_, objOp := src.(ObjectOp)
_, arrOp := src.(ArrayOp)
st := NewState()
nn, ok := nodes.Apply(root, func(n nodes.Node) (nodes.Node, bool) {
if n != nil {
if objOp {
if _, ok := n.(nodes.Object); !ok {
return n, false
}
} else if arrOp {
if _, ok := n.(nodes.Array); !ok {
return n, false
}
}
}
st.Reset()
if ok, err := src.Check(st, n); err != nil | else if !ok {
return n, false
}
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
return n, false
}
return nn, true
})
err := NewMultiError(errs...)
if ok {
return nn, err
}
return root, err
}
// Mappings takes multiple mappings and optimizes the process of applying them as a single transformation.
func Mappings(maps ...Mapping) Transformer {
if len(maps) == 0 {
return mappings{}
}
mp := mappings{
all: maps,
}
if optimizeCheck {
mp.byKind = make(map[nodes.Kind][]Mapping)
mp.index()
}
return mp
}
type mappings struct {
all []Mapping
// indexed mappings
byKind map[nodes.Kind][]Mapping // mappings applied to specific node kind
typedObj map[string][]Mapping // mappings for objects with specific type
typedAny []Mapping // mappings for any typed object (operations that does not mention the type)
}
func (m *mappings) index() {
precompile := func(m Mapping) Mapping {
return Map(m.Mapping())
}
type ordered struct {
ind int
mp Mapping
}
var typedAny []ordered
typed := make(map[string][]ordered)
for i, mp := range m.all {
// pre-compile object operations (sort fields for unordered ops, etc)
mp = precompile(mp)
oop, _ := mp.Mapping()
if chk, ok := oop.(*opCheck); ok {
oop = chk.op
}
// switch by operation type and make a separate list
// next time we will see a node with matching type, we will apply only specific ops
for _, k := range oop.Kinds().Split() {
m.byKind[k] = append(m.byKind[k], mp)
}
switch op := oop.(type) {
case ObjectOp:
specific := false
fields, _ := op.Fields()
if f, ok := fields.Get(uast.KeyType); ok && !f.Optional {
if f.Fixed != nil {
typ := *f.Fixed
if typ, ok := typ.(nodes.String); ok {
s := string(typ)
typed[s] = append(typed[s], ordered{ind: i, mp: mp})
specific = true
}
}
}
if !specific {
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
default:
// the type is unknown, thus we should try to apply it to objects and array as well
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
}
m.typedObj = make(map[string][]Mapping, len(typed))
for typ, ord := range typed {
ord = append(ord, typedAny...)
sort.Slice(ord, func(i, j int) bool {
return ord[i].ind < ord[j].ind
})
maps := make([]Mapping, 0, len(ord))
for _, o := range ord {
maps = append(maps, o.mp)
}
m.typedObj[typ] = maps
}
}
func (m mappings) Do(root nodes.Node) (nodes.Node, error) {
var errs []error
st := NewState()
nn, ok := nodes.Apply(root, func(old nodes.Node) (nodes.Node, bool) {
var maps []Mapping
if !optimizeCheck {
maps = m.all
} else {
maps = m.byKind[nodes.KindOf(old)]
switch old := old.(type) {
case nodes.Object:
if typ, ok := old[uast.KeyType].(nodes.String); ok {
if mp, ok := m.typedObj[string(typ)]; ok {
maps = mp
}
}
}
}
n := old
applied := false
for _, mp := range maps {
src, dst := mp.Mapping()
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
continue
} else if !ok {
continue
}
applied = true
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
continue
}
n = nn
}
if !applied {
return old, false
}
return n, true
})
err := NewMultiError(errs...)
if err == nil {
err = st.Validate()
}
if ok {
return nn, err
}
return root, err
}
// NewState creates a new state for Ops to work on.
// It stores variables, flags and anything that necessary
// for transformation steps to persist data.
func NewState() *State {
return &State{}
}
// Vars is a set of variables with their values.
type Vars map[string]nodes.Node
// State stores all variables (placeholder values, flags and wny other state) between Check and Construct steps.
type State struct {
vars Vars
unused map[string]struct{}
states map[string][]*State
}
// Reset clears the state and allows to reuse an object.
func (st *State) Reset() {
st.vars = nil
st.unused = nil
st.states = nil
}
// Validate should be called after a successful transformation to check if there are any errors related to unused state.
func (st *State) Validate() error {
if len(st.unused) == 0 {
return nil
}
names := make([]string, 0, len(st.unused))
for name := range st.unused {
names = append(names, name)
}
sort.Strings(names)
return ErrVariableUnused.New(names)
}
// Clone will return a copy of the State. This can be used to apply Check and throw away any variables produced by it.
// To merge a cloned state back use ApplyFrom on a parent state.
func (st *State) Clone() *State {
st2 := NewState()
if len(st.vars) != 0 {
st2.vars = make(Vars)
st2.unused = make(map[string]struct{})
}
for k, v := range st.vars {
st2.vars[k] = v
}
for k := range st.unused {
st2.unused[k] = struct{}{}
}
if len(st.states) != 0 {
st2.states = make(map[string][]*State)
}
for k, v := range st.states {
st2.states[k] = v
}
return st2
}
// ApplyFrom merges a provided state into this state object.
func (st *State) ApplyFrom(st2 *State) {
if len(st2.vars) != 0 && st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
for k, v := range st2.vars {
if _, ok := st.vars[k]; !ok {
st.vars[k] = v
}
}
for k := range st2.unused {
st.unused[k] = struct{}{}
}
if len(st2.states) != 0 && st.states == nil {
st.states = make(map[string][]*State)
}
for k, v := range st2.states {
if _, ok := st.states[k]; !ok {
st.states[k] = v
}
}
}
// GetVar looks up a named variable.
func (st *State) GetVar(name string) (nodes.Node, bool) {
n, ok := st.vars[name]
if ok {
delete(st.unused, name)
}
return n, ok
}
// MustGetVar looks up a named variable and returns ErrVariableNotDefined in case it does not exists.
func (st *State) MustGetVar(name string) (nodes.Node, error) {
n, ok := st.GetVar(name)
if !ok {
return nil, ErrVariableNotDefined.New(name)
}
return n, nil
}
// VarsPtrs is a set of variable pointers.
type VarsPtrs map[string]nodes.NodePtr
// MustGetVars is like MustGetVar but fetches multiple variables in one operation.
func (st *State) MustGetVars(vars VarsPtrs) error {
for name, dst := range vars {
n, ok := st.GetVar(name)
if !ok {
return ErrVariableNotDefined.New(name)
}
if err := dst.SetNode(n); err != nil {
return err
}
}
return nil
}
// SetVar sets a named variable. It will return ErrVariableRedeclared if a variable with the same name is already set.
// It will ignore the operation if variable already exists and has the same value (nodes.Value).
func (st *State) SetVar(name string, val nodes.Node) error {
cur, ok := st.vars[name]
if !ok {
// not declared
if st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
st.vars[name] = val
st.unused[name] = struct{}{}
return nil
}
if nodes.Equal(cur, val) {
// already declared, and the same value is already in the map
return nil
}
return ErrVariableRedeclared.New(name, cur, val)
}
// SetVars is like SetVar but sets multiple variables in one operation.
func (st *State) SetVars(vars Vars) error {
for k, v := range vars {
if err := st.SetVar(k, v); err != nil {
return err
}
}
return nil
}
// GetStateVar returns a stored sub-state from a named variable.
func (st *State) GetStateVar(name string) ([]*State, bool) {
n, ok := st.states[name]
return n, ok
}
// SetStateVar sets a sub-state variable. It returns ErrVariableRedeclared if the variable with this name already exists.
func (st *State) SetStateVar(name string, sub []*State) error {
cur, ok := st.states[name]
if ok {
return ErrVariableRedeclared.New(name, cur, sub)
}
if st.states == nil {
st.states = make(map[string][]*State)
}
st.states[name] = sub
return nil
}
// DefaultNamespace is a transform that sets a specified namespace for predicates and values that doesn't have a namespace.
func DefaultNamespace(ns string) Transformer {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
tp, ok := obj[uast.KeyType].(nodes.String)
if !ok {
return n, false, nil
}
if strings.Contains(string(tp), ":") {
return n, false, nil
}
obj = obj.CloneObject()
obj[uast.KeyType] = nodes.String(ns + ":" + string(tp))
return obj, true, nil
})
}
| {
errs = append(errs, errCheck.Wrap(err))
return n, false
} | conditional_block |
transformer.go | package transformer
import (
"sort"
"strings"
"github.com/bblfsh/sdk/v3/uast"
"github.com/bblfsh/sdk/v3/uast/nodes"
)
const optimizeCheck = true
// Transformer is an interface for transformations that operates on AST trees.
// An implementation is responsible for walking the tree and executing transformation on each AST node.
type Transformer interface {
Do(root nodes.Node) (nodes.Node, error)
}
// CodeTransformer is a special case of Transformer that needs an original source code to operate.
type CodeTransformer interface {
OnCode(code string) Transformer
}
// Sel is an operation that can verify if a specific node matches a set of constraints or not.
type Sel interface {
// Kinds returns a mask of all nodes kinds that this operation might match.
Kinds() nodes.Kind
// Check will verify constraints for a single node and returns true if an objects matches them.
// It can also populate the State with variables that can be used later to Construct a different object from the State.
Check(st *State, n nodes.Node) (bool, error)
}
// Mod is an operation that can reconstruct an AST node from a given State.
type Mod interface {
// Construct will use variables stored in State to reconstruct an AST node.
// Node that is provided as an argument may be used as a base for reconstruction.
Construct(st *State, n nodes.Node) (nodes.Node, error)
}
// Op is a generic AST transformation step that describes a shape of an AST tree.
// It can be used to either check the constraints for a specific node and populate state, or to reconstruct an AST shape
// from a the same state (probably produced by another Op).
type Op interface {
Sel
Mod
}
// Transformers appends all provided transformer slices into single one.
func Transformers(arr ...[]Transformer) []Transformer {
var out []Transformer
for _, a := range arr {
out = append(out, a...)
}
return out
}
var _ Transformer = (TransformFunc)(nil)
// TransformFunc is a function that will be applied to each AST node to transform the tree.
// It returns a new AST and true if tree was changed, or an old node and false if no modifications were done.
// The the tree will be traversed automatically and the callback will be called for each node.
type TransformFunc func(n nodes.Node) (nodes.Node, bool, error)
// Do runs a transformation function for each AST node.
func (f TransformFunc) Do(n nodes.Node) (nodes.Node, error) {
var last error
nn, ok := nodes.Apply(n, func(n nodes.Node) (nodes.Node, bool) {
nn, ok, err := f(n)
if err != nil {
last = err
return n, false
} else if !ok {
return n, false
}
return nn, ok
})
if ok {
return nn, last
}
return n, last
}
var _ Transformer = (TransformObjFunc)(nil)
// TransformObjFunc is like TransformFunc, but only matches Object nodes.
type TransformObjFunc func(n nodes.Object) (nodes.Object, bool, error)
// Func converts this TransformObjFunc to a regular TransformFunc by skipping all non-object nodes.
func (f TransformObjFunc) Func() TransformFunc {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
nn, ok, err := f(obj)
if err != nil {
return n, false, err
} else if !ok {
return n, false, nil
}
return nn, ok, nil
})
}
// Do runs a transformation function for each AST node.
func (f TransformObjFunc) Do(n nodes.Node) (nodes.Node, error) {
return f.Func().Do(n)
}
// Map creates a two-way mapping between two transform operations.
// The first operation will be used to check constraints for each node and store state, while the second one will use
// the state to construct a new tree.
func Map(src, dst Op) Mapping {
return mapping{src: src, dst: dst}
}
func MapObj(src, dst ObjectOp) ObjMapping {
return objMapping{src: src, dst: dst}
}
func MapPart(vr string, m ObjMapping) ObjMapping {
src, dst := m.ObjMapping()
_, sok := src.Fields()
_, dok := dst.Fields()
if !sok && !dok {
// both contain partial op, ignore current label
return MapObj(src, dst)
} else if sok != dok {
panic("inconsistent use of Part")
}
return MapObj(Part(vr, src), Part(vr, dst))
}
func Identity(op Op) Mapping {
return Map(op, op)
}
type Mapping interface {
Mapping() (src, dst Op)
}
type ObjMapping interface {
Mapping
ObjMapping() (src, dst ObjectOp)
}
type MappingOp interface {
Op
Mapping
}
type mapping struct {
src, dst Op
}
func (m mapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
type objMapping struct {
src, dst ObjectOp
}
func (m objMapping) Mapping() (src, dst Op) {
return m.src, m.dst
}
func (m objMapping) ObjMapping() (src, dst ObjectOp) {
return m.src, m.dst
}
// Reverse changes a transformation direction, allowing to construct the source tree.
func Reverse(m Mapping) Mapping {
src, dst := m.Mapping()
return Map(dst, src)
}
func (m mapping) apply(root nodes.Node) (nodes.Node, error) {
src, dst := m.src, m.dst
var errs []error
_, objOp := src.(ObjectOp)
_, arrOp := src.(ArrayOp)
st := NewState()
nn, ok := nodes.Apply(root, func(n nodes.Node) (nodes.Node, bool) {
if n != nil {
if objOp {
if _, ok := n.(nodes.Object); !ok {
return n, false
}
} else if arrOp {
if _, ok := n.(nodes.Array); !ok {
return n, false
}
}
}
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
return n, false
} else if !ok {
return n, false
}
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
return n, false
}
return nn, true
})
err := NewMultiError(errs...)
if ok {
return nn, err
}
return root, err
}
// Mappings takes multiple mappings and optimizes the process of applying them as a single transformation.
func Mappings(maps ...Mapping) Transformer {
if len(maps) == 0 {
return mappings{}
}
mp := mappings{
all: maps,
}
if optimizeCheck {
mp.byKind = make(map[nodes.Kind][]Mapping)
mp.index()
}
return mp
}
type mappings struct {
all []Mapping
// indexed mappings
byKind map[nodes.Kind][]Mapping // mappings applied to specific node kind
typedObj map[string][]Mapping // mappings for objects with specific type
typedAny []Mapping // mappings for any typed object (operations that does not mention the type)
}
func (m *mappings) index() {
precompile := func(m Mapping) Mapping {
return Map(m.Mapping())
}
type ordered struct {
ind int
mp Mapping
}
var typedAny []ordered
typed := make(map[string][]ordered)
for i, mp := range m.all {
// pre-compile object operations (sort fields for unordered ops, etc)
mp = precompile(mp)
oop, _ := mp.Mapping()
if chk, ok := oop.(*opCheck); ok {
oop = chk.op
}
// switch by operation type and make a separate list
// next time we will see a node with matching type, we will apply only specific ops
for _, k := range oop.Kinds().Split() {
m.byKind[k] = append(m.byKind[k], mp)
}
switch op := oop.(type) {
case ObjectOp:
specific := false
fields, _ := op.Fields()
if f, ok := fields.Get(uast.KeyType); ok && !f.Optional {
if f.Fixed != nil {
typ := *f.Fixed
if typ, ok := typ.(nodes.String); ok {
s := string(typ)
typed[s] = append(typed[s], ordered{ind: i, mp: mp})
specific = true
}
}
}
if !specific {
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
default:
// the type is unknown, thus we should try to apply it to objects and array as well
typedAny = append(typedAny, ordered{ind: i, mp: mp})
}
}
m.typedObj = make(map[string][]Mapping, len(typed))
for typ, ord := range typed {
ord = append(ord, typedAny...)
sort.Slice(ord, func(i, j int) bool {
return ord[i].ind < ord[j].ind
})
maps := make([]Mapping, 0, len(ord))
for _, o := range ord {
maps = append(maps, o.mp)
}
m.typedObj[typ] = maps
}
}
func (m mappings) Do(root nodes.Node) (nodes.Node, error) {
var errs []error
st := NewState()
nn, ok := nodes.Apply(root, func(old nodes.Node) (nodes.Node, bool) {
var maps []Mapping
if !optimizeCheck {
maps = m.all
} else {
maps = m.byKind[nodes.KindOf(old)]
switch old := old.(type) {
case nodes.Object:
if typ, ok := old[uast.KeyType].(nodes.String); ok {
if mp, ok := m.typedObj[string(typ)]; ok {
maps = mp
}
}
}
}
n := old
applied := false
for _, mp := range maps {
src, dst := mp.Mapping()
st.Reset()
if ok, err := src.Check(st, n); err != nil {
errs = append(errs, errCheck.Wrap(err))
continue
} else if !ok {
continue
}
applied = true
nn, err := dst.Construct(st, nil)
if err != nil {
errs = append(errs, errConstruct.Wrap(err))
continue
}
n = nn
}
if !applied {
return old, false
}
return n, true
})
err := NewMultiError(errs...)
if err == nil {
err = st.Validate()
}
if ok {
return nn, err
}
return root, err
}
// NewState creates a new state for Ops to work on.
// It stores variables, flags and anything that necessary
// for transformation steps to persist data.
func NewState() *State {
return &State{}
}
// Vars is a set of variables with their values.
type Vars map[string]nodes.Node
// State stores all variables (placeholder values, flags and wny other state) between Check and Construct steps.
type State struct {
vars Vars
unused map[string]struct{}
states map[string][]*State
}
// Reset clears the state and allows to reuse an object.
func (st *State) | () {
st.vars = nil
st.unused = nil
st.states = nil
}
// Validate should be called after a successful transformation to check if there are any errors related to unused state.
func (st *State) Validate() error {
if len(st.unused) == 0 {
return nil
}
names := make([]string, 0, len(st.unused))
for name := range st.unused {
names = append(names, name)
}
sort.Strings(names)
return ErrVariableUnused.New(names)
}
// Clone will return a copy of the State. This can be used to apply Check and throw away any variables produced by it.
// To merge a cloned state back use ApplyFrom on a parent state.
func (st *State) Clone() *State {
st2 := NewState()
if len(st.vars) != 0 {
st2.vars = make(Vars)
st2.unused = make(map[string]struct{})
}
for k, v := range st.vars {
st2.vars[k] = v
}
for k := range st.unused {
st2.unused[k] = struct{}{}
}
if len(st.states) != 0 {
st2.states = make(map[string][]*State)
}
for k, v := range st.states {
st2.states[k] = v
}
return st2
}
// ApplyFrom merges a provided state into this state object.
func (st *State) ApplyFrom(st2 *State) {
if len(st2.vars) != 0 && st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
for k, v := range st2.vars {
if _, ok := st.vars[k]; !ok {
st.vars[k] = v
}
}
for k := range st2.unused {
st.unused[k] = struct{}{}
}
if len(st2.states) != 0 && st.states == nil {
st.states = make(map[string][]*State)
}
for k, v := range st2.states {
if _, ok := st.states[k]; !ok {
st.states[k] = v
}
}
}
// GetVar looks up a named variable.
func (st *State) GetVar(name string) (nodes.Node, bool) {
n, ok := st.vars[name]
if ok {
delete(st.unused, name)
}
return n, ok
}
// MustGetVar looks up a named variable and returns ErrVariableNotDefined in case it does not exists.
func (st *State) MustGetVar(name string) (nodes.Node, error) {
n, ok := st.GetVar(name)
if !ok {
return nil, ErrVariableNotDefined.New(name)
}
return n, nil
}
// VarsPtrs is a set of variable pointers.
type VarsPtrs map[string]nodes.NodePtr
// MustGetVars is like MustGetVar but fetches multiple variables in one operation.
func (st *State) MustGetVars(vars VarsPtrs) error {
for name, dst := range vars {
n, ok := st.GetVar(name)
if !ok {
return ErrVariableNotDefined.New(name)
}
if err := dst.SetNode(n); err != nil {
return err
}
}
return nil
}
// SetVar sets a named variable. It will return ErrVariableRedeclared if a variable with the same name is already set.
// It will ignore the operation if variable already exists and has the same value (nodes.Value).
func (st *State) SetVar(name string, val nodes.Node) error {
cur, ok := st.vars[name]
if !ok {
// not declared
if st.vars == nil {
st.vars = make(Vars)
st.unused = make(map[string]struct{})
}
st.vars[name] = val
st.unused[name] = struct{}{}
return nil
}
if nodes.Equal(cur, val) {
// already declared, and the same value is already in the map
return nil
}
return ErrVariableRedeclared.New(name, cur, val)
}
// SetVars is like SetVar but sets multiple variables in one operation.
func (st *State) SetVars(vars Vars) error {
for k, v := range vars {
if err := st.SetVar(k, v); err != nil {
return err
}
}
return nil
}
// GetStateVar returns a stored sub-state from a named variable.
func (st *State) GetStateVar(name string) ([]*State, bool) {
n, ok := st.states[name]
return n, ok
}
// SetStateVar sets a sub-state variable. It returns ErrVariableRedeclared if the variable with this name already exists.
func (st *State) SetStateVar(name string, sub []*State) error {
cur, ok := st.states[name]
if ok {
return ErrVariableRedeclared.New(name, cur, sub)
}
if st.states == nil {
st.states = make(map[string][]*State)
}
st.states[name] = sub
return nil
}
// DefaultNamespace is a transform that sets a specified namespace for predicates and values that doesn't have a namespace.
func DefaultNamespace(ns string) Transformer {
return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) {
obj, ok := n.(nodes.Object)
if !ok {
return n, false, nil
}
tp, ok := obj[uast.KeyType].(nodes.String)
if !ok {
return n, false, nil
}
if strings.Contains(string(tp), ":") {
return n, false, nil
}
obj = obj.CloneObject()
obj[uast.KeyType] = nodes.String(ns + ":" + string(tp))
return obj, true, nil
})
}
| Reset | identifier_name |
base-chart.component.ts | import { AfterViewInit, Component, HostListener, OnDestroy } from '@angular/core';
import * as Chart from 'chart.js';
import 'rxjs-compat/add/observable/of';
import {ChartDataSets} from 'chart.js';
import { VisState } from '../_ngrx/vis.state';
import { select, Store } from '@ngrx/store';
import { IXAxis, TEMP, FIELD, ANGLE, IYAxis, CRIT_CURR_WIDTH, CRIT_CURR, N_VALUE } from '../_shared/interfaces/axis';
import { takeUntil } from 'rxjs/operators';
import { Subject } from 'rxjs';
import { ActivatedRoute } from '@angular/router';
import {
RequestInitialGraphDataAction,
SetHighContrastAction,
SetXScaleAction,
SetYScaleAction,
ChangeConstantValueAction,
ChangeDependencyAction,
ChangeConstantAction,
SetYAxisAction,
SetGraphNotLoadedAction,
SetAlternatePointsAction,
SetLineTensionAction,
SetPointSizeAction
} from '../_ngrx/vis.actions';
@Component({
selector: 'vis-base-chart',
templateUrl: './base-chart.component.html',
styleUrls: ['./base-chart.component.scss']
})
export class BaseChartComponent implements AfterViewInit, OnDestroy {
public id: number;
public windowWidth: number;
public datasets: ChartDataSets[];
public title: string;
public xAxisLabel: string;
public yAxisLabel: string;
public logScaleY = false;
public logScaleX = false;
public highContrastMode = false;
public alternatePointMode = false;
public Yaxis: IYAxis;
public dependency: IXAxis;
private DEPENDENCY_VALUES = [TEMP, FIELD, ANGLE];
public lineTension = false;
public pointSize = 3;
public firstParam: string;
public secondParam: string;
public thirdParam: string;
public fourthParam: string;
public fifthParam: string
private rValues = [178, 44, 60, 203, 88, 84, 136, 96, 158, 166, 77, 108, 182, 116];
private gValues = [51, 50, 55, 91, 162, 73, 91, 116, 149, 87, 160, 131, 197, 200];
private bValues = [76, 63, 186, 200, 97, 118, 186, 115, 64, 107, 171, 199, 107, 138];
private pointStyles = [
'circle', 'rect', 'triangle', 'cross', 'crossRot', 'rectRot', 'star'
];
private chart: Chart;
private tickStyle = {
autoSkip: false,
fontSize: 16,
fontColor: '#383838',
};
private onDestroy: Subject<boolean> = new Subject<boolean>();
constructor(private _store: Store<VisState>, private _route: ActivatedRoute) {
this.id = this._route.snapshot.params['id'];
}
ngOnInit() {
this.firstParam = this._route.snapshot.queryParamMap.get('indepscale');
this.secondParam= this._route.snapshot.queryParamMap.get('depscale');
this.thirdParam= this._route.snapshot.queryParamMap.get('dependency');
this.fourthParam= this._route.snapshot.queryParamMap.get('constant');
this.fifthParam = this._route.snapshot.queryParamMap.get('yaxis');
}
ngOnDestroy() { | this.onDestroy.next(true);
this.onDestroy.unsubscribe();
}
@HostListener('window:resize')
onResize() {
this.windowWidth = window.innerWidth;
const currentPosition = this.chart.config.options.legend.position;
const newPosition = this.getLegendPosition();
if (currentPosition !== newPosition) {
this.updateChartWidth();
}
}
ngAfterViewInit(): void {
this.windowWidth = window.innerWidth;
this._store.pipe(select('vis'), takeUntil(this.onDestroy))
.subscribe((state: VisState) => {
const {graphTitle, dependency, yAxis, highContrastMode, logScaleX, logScaleY, alternatePointsMode, lineTension, pointSize} = state;
this.highContrastMode = highContrastMode;
this.alternatePointMode = alternatePointsMode;
this.datasets = this.mapExperimentsToChartData(state);
this.title = graphTitle;
this.Yaxis = yAxis;
this.xAxisLabel = dependency.axisName + ' ' + dependency.variable + ' ' + dependency.bracketedUnit;
this.yAxisLabel = yAxis.axisName + ' ' + yAxis.variable + ' ' + yAxis.bracketedUnit;
this.logScaleX = logScaleX;
this.logScaleY = logScaleY;
this.dependency = dependency;
this.lineTension = lineTension;
this.pointSize = pointSize;
this.updateChartAndMetaData();
});
const canvas: any = document.getElementById('chart-canvas');
const ctx = canvas.getContext('2d');
this.chart = new Chart(ctx, {
type: 'line',
data: { datasets: this.datasets},
plugins: [{ beforeDraw: this.drawOnCanvas }],
options: {
responsive: true,
maintainAspectRatio: false,
title: { display: true, position: 'top', text: this.title, fontSize: 17},
legend: { position: this.getLegendPosition(), reverse: true,
labels: {
usePointStyle: true
}
},
layout: { padding: { top: 0 } },
hover: { mode: 'nearest' },
elements: {
line: {
tension: this.lineTensionBoolean()
},
point: {
radius: this.pointSize,
hoverRadius: this.pointSize + 1
}
},
tooltips: {
callbacks: {
title: ( () => ''),
beforeLabel: (item, data) => this.formatTopOfToolTip(item, data),
label: (tooltipItems) => this.formatBottomOfToolTip(tooltipItems),
}
},
scales: {
display: true,
yAxes: [{
type: 'linear',
ticks: this.tickStyle,
scaleLabel: { labelString: this.yAxisLabel, display: true, fontSize: 18},
gridLines: { color: '#BEBEBE' }
}],
xAxes: [{
type: 'linear',
ticks: this.tickOptions(),
scaleLabel: { labelString: this.xAxisLabel, display: true, fontSize: 18},
gridLines: { color: '#BEBEBE' }
}],
}
}
});
this.updateChartAndMetaData();
// Triggers default chart update if query parameters are present in url - will be updated to evaluate contents of paramMap array
// if((this.firstParam || this.secondParam) != null){
// this.updateChartDefaults();
// this.updateChartAndMetaData();
// }
this.updateChartDefaults();
}
public setCustomDependency(dep: IXAxis) {
this._store.dispatch(new ChangeDependencyAction(dep));
}
public setCustomConstant(constant: IXAxis) {
this._store.dispatch(new ChangeConstantAction(constant));
}
public setCustomYScale(yscale: boolean) {
// console.log("setYScale: " + !this.useLogScaleX)
this._store.dispatch(new SetYScaleAction(yscale));
}
public setCustomXScale(xscale: boolean) {
// console.log("setXScale: " + !this.useLogScaleX)
this._store.dispatch(new SetXScaleAction(xscale));
}
public setCustomYAxis(yaxis: IYAxis) {
this._store.dispatch(new SetYAxisAction(yaxis));
}
public updateChartDefaults(){
if(this.firstParam === "logarithmic"){
this.setCustomXScale(true);
}
else if(this.firstParam === "linear"){
this.setCustomXScale(false);
}
else{
console.log("invalid logScaleY arg")
}
if(this.secondParam === "logarithmic"){
this.setCustomYScale(true);
}
else if(this.secondParam === "linear"){
this.setCustomYScale(false);
}
else{
console.log("invalid logScaleX arg")
}
if(this.thirdParam === "temp"){
this.setCustomDependency(TEMP);
}
else if(this.thirdParam === "angle"){
this.setCustomDependency(ANGLE);
}
else if(this.thirdParam === "field"){
this.setCustomDependency(FIELD);
}
else{
console.log("invalid dependency arg")
}
if(this.fourthParam === "temp"){
this.setCustomConstant(TEMP);
}
else if(this.fourthParam === "angle"){
this.setCustomConstant(ANGLE);
}
else if(this.fourthParam === "field"){
this.setCustomConstant(FIELD);
}
else{
console.log("invalid constant arg")
}
if(this.fifthParam === "ic"){
this.setCustomYAxis(CRIT_CURR)
}
else if(this.fifthParam === "icw"){
this.setCustomYAxis(CRIT_CURR_WIDTH)
}
else if(this.fifthParam === "nvalue"){
this.setCustomYAxis(N_VALUE);
}
else{
console.log("invalid yaxis arg")
}
}
public getLegendPosition() {
return this.windowWidth >= 800 ? 'right' : 'bottom';
}
public updateChartAndMetaData() {
if (this.chart) {
this.switchAxisScale(this.logScaleY, 'y');
this.switchAxisScale(this.logScaleX, 'x');
this.chart.config.options.title.text = this.title;
this.chart.config.options.scales.xAxes[0].scaleLabel.labelString = this.xAxisLabel;
this.chart.config.options.scales.yAxes[0].scaleLabel.labelString = this.yAxisLabel;
this.chart.config.options.elements.line.tension = this.lineTensionBoolean();
this.chart.config.options.elements.point.radius = this.pointSize;
this.chart.config.options.elements.point.hoverRadius = this.pointSize + 1;
this.chart.config.options.legend.position = this.getLegendPosition();
this.updateChart();
}
}
public updateChart() {
if (this.chart) {
this.chart.data.datasets = this.datasets;
this.chart.update({
duration:0});
}
}
public updateChartWidth() {
this.chart.config.options.legend.position = this.getLegendPosition();
this.chart.update({
duration:0});
}
public tickOptions() {
if (this.dependency.raw === 'angle') {
return {
autoSkip: false,
fontSize: 16,
fontColor: '#383838',
stepSize: 30
};
} else {
return this.tickStyle;
}
}
public lineTensionBoolean() {
return this.lineTension ? 0 : 0.5;
}
public mapExperimentsToChartData(state: VisState) {
const { experiments, dependency, yAxis } = state;
console.log("mapEx state dependency: " + state.dependency.axisName);
console.log("mapEx state constant: " + state.constant.dependence)
const displayedLineType = this.displayedLineType(state.dependency, state.constant);
const numExperiments = experiments.length;
let newData = [];
let pointStylesIndex = 0;
let colorIndex = 0;
experiments.forEach((experiment, index) => {
const xVals = experiment.rows.map(point => point[dependency.raw]);
const yVals = experiment.rows.map(point => point[yAxis.rowKey]);
const chartJSPoints = this.mapXandYPoints(xVals, yVals);
const b = this.highContrastMode ? this.bValues[colorIndex] : Math.floor((255 / numExperiments) * index);
const g = this.highContrastMode ? this.gValues[colorIndex] : Math.floor((255 / numExperiments) * index / 2);
const r = this.highContrastMode ? this.rValues[colorIndex] : 0;
const newChartLine = {
label: experiment[displayedLineType.rowKey] + displayedLineType.unit,
backgroundColor: 'rgba(' + r + ',' + g + ',' + b + ',' + 0.5 + ')',
borderColor: 'rgba(' + r + ',' + g + ',' + b + ',' + 1 + ')',
fill: false,
data: chartJSPoints,
pointStyle: this.alternatePointMode ? this.pointStyles[pointStylesIndex] : 'circle',
};
pointStylesIndex++;
colorIndex ++;
if (colorIndex >= this.rValues.length) {
colorIndex = 0;
}
if (pointStylesIndex >= this.pointStyles.length) {
pointStylesIndex = 0;
}
newData = [...newData, newChartLine];
});
return newData;
}
public displayedLineType(dependency: IXAxis, constant: IXAxis): IXAxis {
const displayed = this.DEPENDENCY_VALUES.filter(val => val !== dependency && val !== constant);
return displayed[0];
}
public mapXandYPoints(xVals, yVals) {
const xyPoints = xVals.map( (e, i) => [e, yVals[i]] ).map( (xs) => {
return { x: xs[0], y: xs[1] };
} );
return this.orderPoints(xyPoints);
}
public orderPoints(array) {
return array.sort((a, b) => (a.x > b.x) ? 1 : ((b.x > a.x) ? -1 : 0));
}
private switchAxisScale(log: boolean, axis: string) {
if (!this.chart) { return; }
if (log) {
this.chart.config.options.scales[axis + 'Axes'][0].type = 'logarithmic';
this.chart.config.options.scales[axis + 'Axes'][0].ticks.autoSkip = true;
this.chart.config.options.scales[axis + 'Axes'][0].ticks.callback = (value, index, arr) => {
if (index === arr.length - 1 || index === 0) {
return value;
} else if (Math.log10(value) % 1 === 0) {
return (value).toLocaleString();
} else {
return '';
}
};
} else {
this.chart.config.options.scales[axis + 'Axes'][0].type = 'linear';
this.chart.config.options.scales[axis + 'Axes'][0].ticks.callback = (value) => value;
this.chart.config.options.scales.xAxes[0].ticks = this.tickOptions();
}
}
public exportPNG() {
const canvas: any = document.getElementById('chart-canvas');
const ctx = canvas.getContext('2d');
return ctx.canvas.toDataURL('image/png');
}
public selectAll() {
// weird quirk of chartjs, visible lines are null hidden
this.updateLineVisibility(null);
}
public deselectAll() {
this.updateLineVisibility(true);
}
public updateLineVisibility(hidden) {
this.chart.data.datasets.forEach( (dataset: any) => {
if (dataset) {
const toUpdate = dataset._meta[Object.keys(dataset._meta)[0]];
if (toUpdate) {
toUpdate.hidden = hidden;
}
}
});
this.chart.update();
}
private drawOnCanvas(chartInstance) {
const chartCtx = chartInstance.chart.ctx;
chartCtx.fillStyle = 'white';
chartCtx.fillRect(0, 0, chartInstance.chart.width, chartInstance.chart.height);
}
private formatTopOfToolTip(item, data) {
const reg = /\((A\/cm|°|A|K|T)\)/;
const reg2 = /\(\d\d°|\d\dK|\d\dT|\dT|\d°|\dK/;
const mainLabel = data.datasets[item.datasetIndex].label;
if (mainLabel.toString().endsWith('°')) {
if (reg.exec(this.xAxisLabel)[1] === 'T') {
return `${reg2.exec(this.title)}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}`;
}
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${reg2.exec(this.title)}, ${mainLabel}`;
} else if (mainLabel.toString().endsWith('K')) {
if (reg.exec(this.xAxisLabel)[1] === '°') {
return `${mainLabel}, ${reg2.exec(this.title)}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}`;
}
return `${mainLabel}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${reg2.exec(this.title)}`;
} else if (mainLabel.toString().endsWith('T')) {
if (reg.exec(this.xAxisLabel)[1] === 'K') {
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}, ${reg2.exec(this.title)}`;
}
return `${reg2.exec(this.title)}, ${mainLabel}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}`;
}
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}, ${reg2.exec(this.title)}`;
}
private formatBottomOfToolTip(tooltipItems) {
const reg = /\((A\/cm|°|A|K|T)\)/;
const reg2 = /𝘐c\/𝘸|𝘐c|𝘯-value/;
let yUnits = '';
if (reg.exec(this.yAxisLabel) !== null) {
yUnits = reg.exec(this.yAxisLabel)[1];
}
const yLabel = tooltipItems.yLabel;
return `${reg2.exec(this.yAxisLabel)} = ${yLabel} ${yUnits}`;
}
} | random_line_split | |
base-chart.component.ts | import { AfterViewInit, Component, HostListener, OnDestroy } from '@angular/core';
import * as Chart from 'chart.js';
import 'rxjs-compat/add/observable/of';
import {ChartDataSets} from 'chart.js';
import { VisState } from '../_ngrx/vis.state';
import { select, Store } from '@ngrx/store';
import { IXAxis, TEMP, FIELD, ANGLE, IYAxis, CRIT_CURR_WIDTH, CRIT_CURR, N_VALUE } from '../_shared/interfaces/axis';
import { takeUntil } from 'rxjs/operators';
import { Subject } from 'rxjs';
import { ActivatedRoute } from '@angular/router';
import {
RequestInitialGraphDataAction,
SetHighContrastAction,
SetXScaleAction,
SetYScaleAction,
ChangeConstantValueAction,
ChangeDependencyAction,
ChangeConstantAction,
SetYAxisAction,
SetGraphNotLoadedAction,
SetAlternatePointsAction,
SetLineTensionAction,
SetPointSizeAction
} from '../_ngrx/vis.actions';
@Component({
selector: 'vis-base-chart',
templateUrl: './base-chart.component.html',
styleUrls: ['./base-chart.component.scss']
})
export class BaseChartComponent implements AfterViewInit, OnDestroy {
public id: number;
public windowWidth: number;
public datasets: ChartDataSets[];
public title: string;
public xAxisLabel: string;
public yAxisLabel: string;
public logScaleY = false;
public logScaleX = false;
public highContrastMode = false;
public alternatePointMode = false;
public Yaxis: IYAxis;
public dependency: IXAxis;
private DEPENDENCY_VALUES = [TEMP, FIELD, ANGLE];
public lineTension = false;
public pointSize = 3;
public firstParam: string;
public secondParam: string;
public thirdParam: string;
public fourthParam: string;
public fifthParam: string
private rValues = [178, 44, 60, 203, 88, 84, 136, 96, 158, 166, 77, 108, 182, 116];
private gValues = [51, 50, 55, 91, 162, 73, 91, 116, 149, 87, 160, 131, 197, 200];
private bValues = [76, 63, 186, 200, 97, 118, 186, 115, 64, 107, 171, 199, 107, 138];
private pointStyles = [
'circle', 'rect', 'triangle', 'cross', 'crossRot', 'rectRot', 'star'
];
private chart: Chart;
private tickStyle = {
autoSkip: false,
fontSize: 16,
fontColor: '#383838',
};
private onDestroy: Subject<boolean> = new Subject<boolean>();
constructor(private _store: Store<VisState>, private _route: ActivatedRoute) {
this.id = this._route.snapshot.params['id'];
}
ngOnInit() {
this.firstParam = this._route.snapshot.queryParamMap.get('indepscale');
this.secondParam= this._route.snapshot.queryParamMap.get('depscale');
this.thirdParam= this._route.snapshot.queryParamMap.get('dependency');
this.fourthParam= this._route.snapshot.queryParamMap.get('constant');
this.fifthParam = this._route.snapshot.queryParamMap.get('yaxis');
}
ngOnDestroy() {
this.onDestroy.next(true);
this.onDestroy.unsubscribe();
}
@HostListener('window:resize')
onResize() {
this.windowWidth = window.innerWidth;
const currentPosition = this.chart.config.options.legend.position;
const newPosition = this.getLegendPosition();
if (currentPosition !== newPosition) {
this.updateChartWidth();
}
}
ngAfterViewInit(): void {
this.windowWidth = window.innerWidth;
this._store.pipe(select('vis'), takeUntil(this.onDestroy))
.subscribe((state: VisState) => {
const {graphTitle, dependency, yAxis, highContrastMode, logScaleX, logScaleY, alternatePointsMode, lineTension, pointSize} = state;
this.highContrastMode = highContrastMode;
this.alternatePointMode = alternatePointsMode;
this.datasets = this.mapExperimentsToChartData(state);
this.title = graphTitle;
this.Yaxis = yAxis;
this.xAxisLabel = dependency.axisName + ' ' + dependency.variable + ' ' + dependency.bracketedUnit;
this.yAxisLabel = yAxis.axisName + ' ' + yAxis.variable + ' ' + yAxis.bracketedUnit;
this.logScaleX = logScaleX;
this.logScaleY = logScaleY;
this.dependency = dependency;
this.lineTension = lineTension;
this.pointSize = pointSize;
this.updateChartAndMetaData();
});
const canvas: any = document.getElementById('chart-canvas');
const ctx = canvas.getContext('2d');
this.chart = new Chart(ctx, {
type: 'line',
data: { datasets: this.datasets},
plugins: [{ beforeDraw: this.drawOnCanvas }],
options: {
responsive: true,
maintainAspectRatio: false,
title: { display: true, position: 'top', text: this.title, fontSize: 17},
legend: { position: this.getLegendPosition(), reverse: true,
labels: {
usePointStyle: true
}
},
layout: { padding: { top: 0 } },
hover: { mode: 'nearest' },
elements: {
line: {
tension: this.lineTensionBoolean()
},
point: {
radius: this.pointSize,
hoverRadius: this.pointSize + 1
}
},
tooltips: {
callbacks: {
title: ( () => ''),
beforeLabel: (item, data) => this.formatTopOfToolTip(item, data),
label: (tooltipItems) => this.formatBottomOfToolTip(tooltipItems),
}
},
scales: {
display: true,
yAxes: [{
type: 'linear',
ticks: this.tickStyle,
scaleLabel: { labelString: this.yAxisLabel, display: true, fontSize: 18},
gridLines: { color: '#BEBEBE' }
}],
xAxes: [{
type: 'linear',
ticks: this.tickOptions(),
scaleLabel: { labelString: this.xAxisLabel, display: true, fontSize: 18},
gridLines: { color: '#BEBEBE' }
}],
}
}
});
this.updateChartAndMetaData();
// Triggers default chart update if query parameters are present in url - will be updated to evaluate contents of paramMap array
// if((this.firstParam || this.secondParam) != null){
// this.updateChartDefaults();
// this.updateChartAndMetaData();
// }
this.updateChartDefaults();
}
public setCustomDependency(dep: IXAxis) {
this._store.dispatch(new ChangeDependencyAction(dep));
}
public setCustomConstant(constant: IXAxis) {
this._store.dispatch(new ChangeConstantAction(constant));
}
public setCustomYScale(yscale: boolean) {
// console.log("setYScale: " + !this.useLogScaleX)
this._store.dispatch(new SetYScaleAction(yscale));
}
public setCustomXScale(xscale: boolean) {
// console.log("setXScale: " + !this.useLogScaleX)
this._store.dispatch(new SetXScaleAction(xscale));
}
public setCustomYAxis(yaxis: IYAxis) {
this._store.dispatch(new SetYAxisAction(yaxis));
}
public updateChartDefaults(){
if(this.firstParam === "logarithmic"){
this.setCustomXScale(true);
}
else if(this.firstParam === "linear"){
this.setCustomXScale(false);
}
else{
console.log("invalid logScaleY arg")
}
if(this.secondParam === "logarithmic"){
this.setCustomYScale(true);
}
else if(this.secondParam === "linear"){
this.setCustomYScale(false);
}
else{
console.log("invalid logScaleX arg")
}
if(this.thirdParam === "temp"){
this.setCustomDependency(TEMP);
}
else if(this.thirdParam === "angle"){
this.setCustomDependency(ANGLE);
}
else if(this.thirdParam === "field"){
this.setCustomDependency(FIELD);
}
else{
console.log("invalid dependency arg")
}
if(this.fourthParam === "temp"){
this.setCustomConstant(TEMP);
}
else if(this.fourthParam === "angle"){
this.setCustomConstant(ANGLE);
}
else if(this.fourthParam === "field"){
this.setCustomConstant(FIELD);
}
else{
console.log("invalid constant arg")
}
if(this.fifthParam === "ic"){
this.setCustomYAxis(CRIT_CURR)
}
else if(this.fifthParam === "icw"){
this.setCustomYAxis(CRIT_CURR_WIDTH)
}
else if(this.fifthParam === "nvalue"){
this.setCustomYAxis(N_VALUE);
}
else{
console.log("invalid yaxis arg")
}
}
public getLegendPosition() {
return this.windowWidth >= 800 ? 'right' : 'bottom';
}
public updateChartAndMetaData() {
if (this.chart) {
this.switchAxisScale(this.logScaleY, 'y');
this.switchAxisScale(this.logScaleX, 'x');
this.chart.config.options.title.text = this.title;
this.chart.config.options.scales.xAxes[0].scaleLabel.labelString = this.xAxisLabel;
this.chart.config.options.scales.yAxes[0].scaleLabel.labelString = this.yAxisLabel;
this.chart.config.options.elements.line.tension = this.lineTensionBoolean();
this.chart.config.options.elements.point.radius = this.pointSize;
this.chart.config.options.elements.point.hoverRadius = this.pointSize + 1;
this.chart.config.options.legend.position = this.getLegendPosition();
this.updateChart();
}
}
public updateChart() {
if (this.chart) {
this.chart.data.datasets = this.datasets;
this.chart.update({
duration:0});
}
}
public updateChartWidth() {
this.chart.config.options.legend.position = this.getLegendPosition();
this.chart.update({
duration:0});
}
public tickOptions() {
if (this.dependency.raw === 'angle') {
return {
autoSkip: false,
fontSize: 16,
fontColor: '#383838',
stepSize: 30
};
} else {
return this.tickStyle;
}
}
public lineTensionBoolean() {
return this.lineTension ? 0 : 0.5;
}
public mapExperimentsToChartData(state: VisState) {
const { experiments, dependency, yAxis } = state;
console.log("mapEx state dependency: " + state.dependency.axisName);
console.log("mapEx state constant: " + state.constant.dependence)
const displayedLineType = this.displayedLineType(state.dependency, state.constant);
const numExperiments = experiments.length;
let newData = [];
let pointStylesIndex = 0;
let colorIndex = 0;
experiments.forEach((experiment, index) => {
const xVals = experiment.rows.map(point => point[dependency.raw]);
const yVals = experiment.rows.map(point => point[yAxis.rowKey]);
const chartJSPoints = this.mapXandYPoints(xVals, yVals);
const b = this.highContrastMode ? this.bValues[colorIndex] : Math.floor((255 / numExperiments) * index);
const g = this.highContrastMode ? this.gValues[colorIndex] : Math.floor((255 / numExperiments) * index / 2);
const r = this.highContrastMode ? this.rValues[colorIndex] : 0;
const newChartLine = {
label: experiment[displayedLineType.rowKey] + displayedLineType.unit,
backgroundColor: 'rgba(' + r + ',' + g + ',' + b + ',' + 0.5 + ')',
borderColor: 'rgba(' + r + ',' + g + ',' + b + ',' + 1 + ')',
fill: false,
data: chartJSPoints,
pointStyle: this.alternatePointMode ? this.pointStyles[pointStylesIndex] : 'circle',
};
pointStylesIndex++;
colorIndex ++;
if (colorIndex >= this.rValues.length) {
colorIndex = 0;
}
if (pointStylesIndex >= this.pointStyles.length) {
pointStylesIndex = 0;
}
newData = [...newData, newChartLine];
});
return newData;
}
public displayedLineType(dependency: IXAxis, constant: IXAxis): IXAxis {
const displayed = this.DEPENDENCY_VALUES.filter(val => val !== dependency && val !== constant);
return displayed[0];
}
public mapXandYPoints(xVals, yVals) |
public orderPoints(array) {
return array.sort((a, b) => (a.x > b.x) ? 1 : ((b.x > a.x) ? -1 : 0));
}
private switchAxisScale(log: boolean, axis: string) {
if (!this.chart) { return; }
if (log) {
this.chart.config.options.scales[axis + 'Axes'][0].type = 'logarithmic';
this.chart.config.options.scales[axis + 'Axes'][0].ticks.autoSkip = true;
this.chart.config.options.scales[axis + 'Axes'][0].ticks.callback = (value, index, arr) => {
if (index === arr.length - 1 || index === 0) {
return value;
} else if (Math.log10(value) % 1 === 0) {
return (value).toLocaleString();
} else {
return '';
}
};
} else {
this.chart.config.options.scales[axis + 'Axes'][0].type = 'linear';
this.chart.config.options.scales[axis + 'Axes'][0].ticks.callback = (value) => value;
this.chart.config.options.scales.xAxes[0].ticks = this.tickOptions();
}
}
public exportPNG() {
const canvas: any = document.getElementById('chart-canvas');
const ctx = canvas.getContext('2d');
return ctx.canvas.toDataURL('image/png');
}
public selectAll() {
// weird quirk of chartjs, visible lines are null hidden
this.updateLineVisibility(null);
}
public deselectAll() {
this.updateLineVisibility(true);
}
public updateLineVisibility(hidden) {
this.chart.data.datasets.forEach( (dataset: any) => {
if (dataset) {
const toUpdate = dataset._meta[Object.keys(dataset._meta)[0]];
if (toUpdate) {
toUpdate.hidden = hidden;
}
}
});
this.chart.update();
}
private drawOnCanvas(chartInstance) {
const chartCtx = chartInstance.chart.ctx;
chartCtx.fillStyle = 'white';
chartCtx.fillRect(0, 0, chartInstance.chart.width, chartInstance.chart.height);
}
private formatTopOfToolTip(item, data) {
const reg = /\((A\/cm|°|A|K|T)\)/;
const reg2 = /\(\d\d°|\d\dK|\d\dT|\dT|\d°|\dK/;
const mainLabel = data.datasets[item.datasetIndex].label;
if (mainLabel.toString().endsWith('°')) {
if (reg.exec(this.xAxisLabel)[1] === 'T') {
return `${reg2.exec(this.title)}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}`;
}
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${reg2.exec(this.title)}, ${mainLabel}`;
} else if (mainLabel.toString().endsWith('K')) {
if (reg.exec(this.xAxisLabel)[1] === '°') {
return `${mainLabel}, ${reg2.exec(this.title)}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}`;
}
return `${mainLabel}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${reg2.exec(this.title)}`;
} else if (mainLabel.toString().endsWith('T')) {
if (reg.exec(this.xAxisLabel)[1] === 'K') {
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}, ${reg2.exec(this.title)}`;
}
return `${reg2.exec(this.title)}, ${mainLabel}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}`;
}
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}, ${reg2.exec(this.title)}`;
}
private formatBottomOfToolTip(tooltipItems) {
const reg = /\((A\/cm|°|A|K|T)\)/;
const reg2 = /𝘐c\/𝘸|𝘐c|𝘯-value/;
let yUnits = '';
if (reg.exec(this.yAxisLabel) !== null) {
yUnits = reg.exec(this.yAxisLabel)[1];
}
const yLabel = tooltipItems.yLabel;
return `${reg2.exec(this.yAxisLabel)} = ${yLabel} ${yUnits}`;
}
}
| {
const xyPoints = xVals.map( (e, i) => [e, yVals[i]] ).map( (xs) => {
return { x: xs[0], y: xs[1] };
} );
return this.orderPoints(xyPoints);
} | identifier_body |
base-chart.component.ts | import { AfterViewInit, Component, HostListener, OnDestroy } from '@angular/core';
import * as Chart from 'chart.js';
import 'rxjs-compat/add/observable/of';
import {ChartDataSets} from 'chart.js';
import { VisState } from '../_ngrx/vis.state';
import { select, Store } from '@ngrx/store';
import { IXAxis, TEMP, FIELD, ANGLE, IYAxis, CRIT_CURR_WIDTH, CRIT_CURR, N_VALUE } from '../_shared/interfaces/axis';
import { takeUntil } from 'rxjs/operators';
import { Subject } from 'rxjs';
import { ActivatedRoute } from '@angular/router';
import {
RequestInitialGraphDataAction,
SetHighContrastAction,
SetXScaleAction,
SetYScaleAction,
ChangeConstantValueAction,
ChangeDependencyAction,
ChangeConstantAction,
SetYAxisAction,
SetGraphNotLoadedAction,
SetAlternatePointsAction,
SetLineTensionAction,
SetPointSizeAction
} from '../_ngrx/vis.actions';
@Component({
selector: 'vis-base-chart',
templateUrl: './base-chart.component.html',
styleUrls: ['./base-chart.component.scss']
})
export class BaseChartComponent implements AfterViewInit, OnDestroy {
public id: number;
public windowWidth: number;
public datasets: ChartDataSets[];
public title: string;
public xAxisLabel: string;
public yAxisLabel: string;
public logScaleY = false;
public logScaleX = false;
public highContrastMode = false;
public alternatePointMode = false;
public Yaxis: IYAxis;
public dependency: IXAxis;
private DEPENDENCY_VALUES = [TEMP, FIELD, ANGLE];
public lineTension = false;
public pointSize = 3;
public firstParam: string;
public secondParam: string;
public thirdParam: string;
public fourthParam: string;
public fifthParam: string
private rValues = [178, 44, 60, 203, 88, 84, 136, 96, 158, 166, 77, 108, 182, 116];
private gValues = [51, 50, 55, 91, 162, 73, 91, 116, 149, 87, 160, 131, 197, 200];
private bValues = [76, 63, 186, 200, 97, 118, 186, 115, 64, 107, 171, 199, 107, 138];
private pointStyles = [
'circle', 'rect', 'triangle', 'cross', 'crossRot', 'rectRot', 'star'
];
private chart: Chart;
private tickStyle = {
autoSkip: false,
fontSize: 16,
fontColor: '#383838',
};
private onDestroy: Subject<boolean> = new Subject<boolean>();
constructor(private _store: Store<VisState>, private _route: ActivatedRoute) {
this.id = this._route.snapshot.params['id'];
}
ngOnInit() {
this.firstParam = this._route.snapshot.queryParamMap.get('indepscale');
this.secondParam= this._route.snapshot.queryParamMap.get('depscale');
this.thirdParam= this._route.snapshot.queryParamMap.get('dependency');
this.fourthParam= this._route.snapshot.queryParamMap.get('constant');
this.fifthParam = this._route.snapshot.queryParamMap.get('yaxis');
}
ngOnDestroy() {
this.onDestroy.next(true);
this.onDestroy.unsubscribe();
}
@HostListener('window:resize')
onResize() {
this.windowWidth = window.innerWidth;
const currentPosition = this.chart.config.options.legend.position;
const newPosition = this.getLegendPosition();
if (currentPosition !== newPosition) {
this.updateChartWidth();
}
}
ngAfterViewInit(): void {
this.windowWidth = window.innerWidth;
this._store.pipe(select('vis'), takeUntil(this.onDestroy))
.subscribe((state: VisState) => {
const {graphTitle, dependency, yAxis, highContrastMode, logScaleX, logScaleY, alternatePointsMode, lineTension, pointSize} = state;
this.highContrastMode = highContrastMode;
this.alternatePointMode = alternatePointsMode;
this.datasets = this.mapExperimentsToChartData(state);
this.title = graphTitle;
this.Yaxis = yAxis;
this.xAxisLabel = dependency.axisName + ' ' + dependency.variable + ' ' + dependency.bracketedUnit;
this.yAxisLabel = yAxis.axisName + ' ' + yAxis.variable + ' ' + yAxis.bracketedUnit;
this.logScaleX = logScaleX;
this.logScaleY = logScaleY;
this.dependency = dependency;
this.lineTension = lineTension;
this.pointSize = pointSize;
this.updateChartAndMetaData();
});
const canvas: any = document.getElementById('chart-canvas');
const ctx = canvas.getContext('2d');
this.chart = new Chart(ctx, {
type: 'line',
data: { datasets: this.datasets},
plugins: [{ beforeDraw: this.drawOnCanvas }],
options: {
responsive: true,
maintainAspectRatio: false,
title: { display: true, position: 'top', text: this.title, fontSize: 17},
legend: { position: this.getLegendPosition(), reverse: true,
labels: {
usePointStyle: true
}
},
layout: { padding: { top: 0 } },
hover: { mode: 'nearest' },
elements: {
line: {
tension: this.lineTensionBoolean()
},
point: {
radius: this.pointSize,
hoverRadius: this.pointSize + 1
}
},
tooltips: {
callbacks: {
title: ( () => ''),
beforeLabel: (item, data) => this.formatTopOfToolTip(item, data),
label: (tooltipItems) => this.formatBottomOfToolTip(tooltipItems),
}
},
scales: {
display: true,
yAxes: [{
type: 'linear',
ticks: this.tickStyle,
scaleLabel: { labelString: this.yAxisLabel, display: true, fontSize: 18},
gridLines: { color: '#BEBEBE' }
}],
xAxes: [{
type: 'linear',
ticks: this.tickOptions(),
scaleLabel: { labelString: this.xAxisLabel, display: true, fontSize: 18},
gridLines: { color: '#BEBEBE' }
}],
}
}
});
this.updateChartAndMetaData();
// Triggers default chart update if query parameters are present in url - will be updated to evaluate contents of paramMap array
// if((this.firstParam || this.secondParam) != null){
// this.updateChartDefaults();
// this.updateChartAndMetaData();
// }
this.updateChartDefaults();
}
public setCustomDependency(dep: IXAxis) {
this._store.dispatch(new ChangeDependencyAction(dep));
}
public setCustomConstant(constant: IXAxis) {
this._store.dispatch(new ChangeConstantAction(constant));
}
public setCustomYScale(yscale: boolean) {
// console.log("setYScale: " + !this.useLogScaleX)
this._store.dispatch(new SetYScaleAction(yscale));
}
public setCustomXScale(xscale: boolean) {
// console.log("setXScale: " + !this.useLogScaleX)
this._store.dispatch(new SetXScaleAction(xscale));
}
public setCustomYAxis(yaxis: IYAxis) {
this._store.dispatch(new SetYAxisAction(yaxis));
}
public updateChartDefaults(){
if(this.firstParam === "logarithmic"){
this.setCustomXScale(true);
}
else if(this.firstParam === "linear"){
this.setCustomXScale(false);
}
else{
console.log("invalid logScaleY arg")
}
if(this.secondParam === "logarithmic"){
this.setCustomYScale(true);
}
else if(this.secondParam === "linear"){
this.setCustomYScale(false);
}
else{
console.log("invalid logScaleX arg")
}
if(this.thirdParam === "temp"){
this.setCustomDependency(TEMP);
}
else if(this.thirdParam === "angle"){
this.setCustomDependency(ANGLE);
}
else if(this.thirdParam === "field"){
this.setCustomDependency(FIELD);
}
else{
console.log("invalid dependency arg")
}
if(this.fourthParam === "temp"){
this.setCustomConstant(TEMP);
}
else if(this.fourthParam === "angle"){
this.setCustomConstant(ANGLE);
}
else if(this.fourthParam === "field"){
this.setCustomConstant(FIELD);
}
else{
console.log("invalid constant arg")
}
if(this.fifthParam === "ic"){
this.setCustomYAxis(CRIT_CURR)
}
else if(this.fifthParam === "icw"){
this.setCustomYAxis(CRIT_CURR_WIDTH)
}
else if(this.fifthParam === "nvalue"){
this.setCustomYAxis(N_VALUE);
}
else{
console.log("invalid yaxis arg")
}
}
public | () {
return this.windowWidth >= 800 ? 'right' : 'bottom';
}
public updateChartAndMetaData() {
if (this.chart) {
this.switchAxisScale(this.logScaleY, 'y');
this.switchAxisScale(this.logScaleX, 'x');
this.chart.config.options.title.text = this.title;
this.chart.config.options.scales.xAxes[0].scaleLabel.labelString = this.xAxisLabel;
this.chart.config.options.scales.yAxes[0].scaleLabel.labelString = this.yAxisLabel;
this.chart.config.options.elements.line.tension = this.lineTensionBoolean();
this.chart.config.options.elements.point.radius = this.pointSize;
this.chart.config.options.elements.point.hoverRadius = this.pointSize + 1;
this.chart.config.options.legend.position = this.getLegendPosition();
this.updateChart();
}
}
public updateChart() {
if (this.chart) {
this.chart.data.datasets = this.datasets;
this.chart.update({
duration:0});
}
}
public updateChartWidth() {
this.chart.config.options.legend.position = this.getLegendPosition();
this.chart.update({
duration:0});
}
public tickOptions() {
if (this.dependency.raw === 'angle') {
return {
autoSkip: false,
fontSize: 16,
fontColor: '#383838',
stepSize: 30
};
} else {
return this.tickStyle;
}
}
public lineTensionBoolean() {
return this.lineTension ? 0 : 0.5;
}
public mapExperimentsToChartData(state: VisState) {
const { experiments, dependency, yAxis } = state;
console.log("mapEx state dependency: " + state.dependency.axisName);
console.log("mapEx state constant: " + state.constant.dependence)
const displayedLineType = this.displayedLineType(state.dependency, state.constant);
const numExperiments = experiments.length;
let newData = [];
let pointStylesIndex = 0;
let colorIndex = 0;
experiments.forEach((experiment, index) => {
const xVals = experiment.rows.map(point => point[dependency.raw]);
const yVals = experiment.rows.map(point => point[yAxis.rowKey]);
const chartJSPoints = this.mapXandYPoints(xVals, yVals);
const b = this.highContrastMode ? this.bValues[colorIndex] : Math.floor((255 / numExperiments) * index);
const g = this.highContrastMode ? this.gValues[colorIndex] : Math.floor((255 / numExperiments) * index / 2);
const r = this.highContrastMode ? this.rValues[colorIndex] : 0;
const newChartLine = {
label: experiment[displayedLineType.rowKey] + displayedLineType.unit,
backgroundColor: 'rgba(' + r + ',' + g + ',' + b + ',' + 0.5 + ')',
borderColor: 'rgba(' + r + ',' + g + ',' + b + ',' + 1 + ')',
fill: false,
data: chartJSPoints,
pointStyle: this.alternatePointMode ? this.pointStyles[pointStylesIndex] : 'circle',
};
pointStylesIndex++;
colorIndex ++;
if (colorIndex >= this.rValues.length) {
colorIndex = 0;
}
if (pointStylesIndex >= this.pointStyles.length) {
pointStylesIndex = 0;
}
newData = [...newData, newChartLine];
});
return newData;
}
public displayedLineType(dependency: IXAxis, constant: IXAxis): IXAxis {
const displayed = this.DEPENDENCY_VALUES.filter(val => val !== dependency && val !== constant);
return displayed[0];
}
public mapXandYPoints(xVals, yVals) {
const xyPoints = xVals.map( (e, i) => [e, yVals[i]] ).map( (xs) => {
return { x: xs[0], y: xs[1] };
} );
return this.orderPoints(xyPoints);
}
public orderPoints(array) {
return array.sort((a, b) => (a.x > b.x) ? 1 : ((b.x > a.x) ? -1 : 0));
}
private switchAxisScale(log: boolean, axis: string) {
if (!this.chart) { return; }
if (log) {
this.chart.config.options.scales[axis + 'Axes'][0].type = 'logarithmic';
this.chart.config.options.scales[axis + 'Axes'][0].ticks.autoSkip = true;
this.chart.config.options.scales[axis + 'Axes'][0].ticks.callback = (value, index, arr) => {
if (index === arr.length - 1 || index === 0) {
return value;
} else if (Math.log10(value) % 1 === 0) {
return (value).toLocaleString();
} else {
return '';
}
};
} else {
this.chart.config.options.scales[axis + 'Axes'][0].type = 'linear';
this.chart.config.options.scales[axis + 'Axes'][0].ticks.callback = (value) => value;
this.chart.config.options.scales.xAxes[0].ticks = this.tickOptions();
}
}
public exportPNG() {
const canvas: any = document.getElementById('chart-canvas');
const ctx = canvas.getContext('2d');
return ctx.canvas.toDataURL('image/png');
}
public selectAll() {
// weird quirk of chartjs, visible lines are null hidden
this.updateLineVisibility(null);
}
public deselectAll() {
this.updateLineVisibility(true);
}
public updateLineVisibility(hidden) {
this.chart.data.datasets.forEach( (dataset: any) => {
if (dataset) {
const toUpdate = dataset._meta[Object.keys(dataset._meta)[0]];
if (toUpdate) {
toUpdate.hidden = hidden;
}
}
});
this.chart.update();
}
private drawOnCanvas(chartInstance) {
const chartCtx = chartInstance.chart.ctx;
chartCtx.fillStyle = 'white';
chartCtx.fillRect(0, 0, chartInstance.chart.width, chartInstance.chart.height);
}
private formatTopOfToolTip(item, data) {
const reg = /\((A\/cm|°|A|K|T)\)/;
const reg2 = /\(\d\d°|\d\dK|\d\dT|\dT|\d°|\dK/;
const mainLabel = data.datasets[item.datasetIndex].label;
if (mainLabel.toString().endsWith('°')) {
if (reg.exec(this.xAxisLabel)[1] === 'T') {
return `${reg2.exec(this.title)}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}`;
}
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${reg2.exec(this.title)}, ${mainLabel}`;
} else if (mainLabel.toString().endsWith('K')) {
if (reg.exec(this.xAxisLabel)[1] === '°') {
return `${mainLabel}, ${reg2.exec(this.title)}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}`;
}
return `${mainLabel}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${reg2.exec(this.title)}`;
} else if (mainLabel.toString().endsWith('T')) {
if (reg.exec(this.xAxisLabel)[1] === 'K') {
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}, ${reg2.exec(this.title)}`;
}
return `${reg2.exec(this.title)}, ${mainLabel}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}`;
}
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}, ${reg2.exec(this.title)}`;
}
private formatBottomOfToolTip(tooltipItems) {
const reg = /\((A\/cm|°|A|K|T)\)/;
const reg2 = /𝘐c\/𝘸|𝘐c|𝘯-value/;
let yUnits = '';
if (reg.exec(this.yAxisLabel) !== null) {
yUnits = reg.exec(this.yAxisLabel)[1];
}
const yLabel = tooltipItems.yLabel;
return `${reg2.exec(this.yAxisLabel)} = ${yLabel} ${yUnits}`;
}
}
| getLegendPosition | identifier_name |
base-chart.component.ts | import { AfterViewInit, Component, HostListener, OnDestroy } from '@angular/core';
import * as Chart from 'chart.js';
import 'rxjs-compat/add/observable/of';
import {ChartDataSets} from 'chart.js';
import { VisState } from '../_ngrx/vis.state';
import { select, Store } from '@ngrx/store';
import { IXAxis, TEMP, FIELD, ANGLE, IYAxis, CRIT_CURR_WIDTH, CRIT_CURR, N_VALUE } from '../_shared/interfaces/axis';
import { takeUntil } from 'rxjs/operators';
import { Subject } from 'rxjs';
import { ActivatedRoute } from '@angular/router';
import {
RequestInitialGraphDataAction,
SetHighContrastAction,
SetXScaleAction,
SetYScaleAction,
ChangeConstantValueAction,
ChangeDependencyAction,
ChangeConstantAction,
SetYAxisAction,
SetGraphNotLoadedAction,
SetAlternatePointsAction,
SetLineTensionAction,
SetPointSizeAction
} from '../_ngrx/vis.actions';
@Component({
selector: 'vis-base-chart',
templateUrl: './base-chart.component.html',
styleUrls: ['./base-chart.component.scss']
})
export class BaseChartComponent implements AfterViewInit, OnDestroy {
public id: number;
public windowWidth: number;
public datasets: ChartDataSets[];
public title: string;
public xAxisLabel: string;
public yAxisLabel: string;
public logScaleY = false;
public logScaleX = false;
public highContrastMode = false;
public alternatePointMode = false;
public Yaxis: IYAxis;
public dependency: IXAxis;
private DEPENDENCY_VALUES = [TEMP, FIELD, ANGLE];
public lineTension = false;
public pointSize = 3;
public firstParam: string;
public secondParam: string;
public thirdParam: string;
public fourthParam: string;
public fifthParam: string
private rValues = [178, 44, 60, 203, 88, 84, 136, 96, 158, 166, 77, 108, 182, 116];
private gValues = [51, 50, 55, 91, 162, 73, 91, 116, 149, 87, 160, 131, 197, 200];
private bValues = [76, 63, 186, 200, 97, 118, 186, 115, 64, 107, 171, 199, 107, 138];
private pointStyles = [
'circle', 'rect', 'triangle', 'cross', 'crossRot', 'rectRot', 'star'
];
private chart: Chart;
private tickStyle = {
autoSkip: false,
fontSize: 16,
fontColor: '#383838',
};
private onDestroy: Subject<boolean> = new Subject<boolean>();
constructor(private _store: Store<VisState>, private _route: ActivatedRoute) {
this.id = this._route.snapshot.params['id'];
}
ngOnInit() {
this.firstParam = this._route.snapshot.queryParamMap.get('indepscale');
this.secondParam= this._route.snapshot.queryParamMap.get('depscale');
this.thirdParam= this._route.snapshot.queryParamMap.get('dependency');
this.fourthParam= this._route.snapshot.queryParamMap.get('constant');
this.fifthParam = this._route.snapshot.queryParamMap.get('yaxis');
}
ngOnDestroy() {
this.onDestroy.next(true);
this.onDestroy.unsubscribe();
}
@HostListener('window:resize')
onResize() {
this.windowWidth = window.innerWidth;
const currentPosition = this.chart.config.options.legend.position;
const newPosition = this.getLegendPosition();
if (currentPosition !== newPosition) {
this.updateChartWidth();
}
}
ngAfterViewInit(): void {
this.windowWidth = window.innerWidth;
this._store.pipe(select('vis'), takeUntil(this.onDestroy))
.subscribe((state: VisState) => {
const {graphTitle, dependency, yAxis, highContrastMode, logScaleX, logScaleY, alternatePointsMode, lineTension, pointSize} = state;
this.highContrastMode = highContrastMode;
this.alternatePointMode = alternatePointsMode;
this.datasets = this.mapExperimentsToChartData(state);
this.title = graphTitle;
this.Yaxis = yAxis;
this.xAxisLabel = dependency.axisName + ' ' + dependency.variable + ' ' + dependency.bracketedUnit;
this.yAxisLabel = yAxis.axisName + ' ' + yAxis.variable + ' ' + yAxis.bracketedUnit;
this.logScaleX = logScaleX;
this.logScaleY = logScaleY;
this.dependency = dependency;
this.lineTension = lineTension;
this.pointSize = pointSize;
this.updateChartAndMetaData();
});
const canvas: any = document.getElementById('chart-canvas');
const ctx = canvas.getContext('2d');
this.chart = new Chart(ctx, {
type: 'line',
data: { datasets: this.datasets},
plugins: [{ beforeDraw: this.drawOnCanvas }],
options: {
responsive: true,
maintainAspectRatio: false,
title: { display: true, position: 'top', text: this.title, fontSize: 17},
legend: { position: this.getLegendPosition(), reverse: true,
labels: {
usePointStyle: true
}
},
layout: { padding: { top: 0 } },
hover: { mode: 'nearest' },
elements: {
line: {
tension: this.lineTensionBoolean()
},
point: {
radius: this.pointSize,
hoverRadius: this.pointSize + 1
}
},
tooltips: {
callbacks: {
title: ( () => ''),
beforeLabel: (item, data) => this.formatTopOfToolTip(item, data),
label: (tooltipItems) => this.formatBottomOfToolTip(tooltipItems),
}
},
scales: {
display: true,
yAxes: [{
type: 'linear',
ticks: this.tickStyle,
scaleLabel: { labelString: this.yAxisLabel, display: true, fontSize: 18},
gridLines: { color: '#BEBEBE' }
}],
xAxes: [{
type: 'linear',
ticks: this.tickOptions(),
scaleLabel: { labelString: this.xAxisLabel, display: true, fontSize: 18},
gridLines: { color: '#BEBEBE' }
}],
}
}
});
this.updateChartAndMetaData();
// Triggers default chart update if query parameters are present in url - will be updated to evaluate contents of paramMap array
// if((this.firstParam || this.secondParam) != null){
// this.updateChartDefaults();
// this.updateChartAndMetaData();
// }
this.updateChartDefaults();
}
public setCustomDependency(dep: IXAxis) {
this._store.dispatch(new ChangeDependencyAction(dep));
}
public setCustomConstant(constant: IXAxis) {
this._store.dispatch(new ChangeConstantAction(constant));
}
public setCustomYScale(yscale: boolean) {
// console.log("setYScale: " + !this.useLogScaleX)
this._store.dispatch(new SetYScaleAction(yscale));
}
public setCustomXScale(xscale: boolean) {
// console.log("setXScale: " + !this.useLogScaleX)
this._store.dispatch(new SetXScaleAction(xscale));
}
public setCustomYAxis(yaxis: IYAxis) {
this._store.dispatch(new SetYAxisAction(yaxis));
}
public updateChartDefaults(){
if(this.firstParam === "logarithmic"){
this.setCustomXScale(true);
}
else if(this.firstParam === "linear"){
this.setCustomXScale(false);
}
else{
console.log("invalid logScaleY arg")
}
if(this.secondParam === "logarithmic"){
this.setCustomYScale(true);
}
else if(this.secondParam === "linear"){
this.setCustomYScale(false);
}
else{
console.log("invalid logScaleX arg")
}
if(this.thirdParam === "temp"){
this.setCustomDependency(TEMP);
}
else if(this.thirdParam === "angle"){
this.setCustomDependency(ANGLE);
}
else if(this.thirdParam === "field"){
this.setCustomDependency(FIELD);
}
else{
console.log("invalid dependency arg")
}
if(this.fourthParam === "temp"){
this.setCustomConstant(TEMP);
}
else if(this.fourthParam === "angle"){
this.setCustomConstant(ANGLE);
}
else if(this.fourthParam === "field"){
this.setCustomConstant(FIELD);
}
else{
console.log("invalid constant arg")
}
if(this.fifthParam === "ic"){
this.setCustomYAxis(CRIT_CURR)
}
else if(this.fifthParam === "icw"){
this.setCustomYAxis(CRIT_CURR_WIDTH)
}
else if(this.fifthParam === "nvalue"){
this.setCustomYAxis(N_VALUE);
}
else{
console.log("invalid yaxis arg")
}
}
public getLegendPosition() {
return this.windowWidth >= 800 ? 'right' : 'bottom';
}
public updateChartAndMetaData() {
if (this.chart) |
}
public updateChart() {
if (this.chart) {
this.chart.data.datasets = this.datasets;
this.chart.update({
duration:0});
}
}
public updateChartWidth() {
this.chart.config.options.legend.position = this.getLegendPosition();
this.chart.update({
duration:0});
}
public tickOptions() {
if (this.dependency.raw === 'angle') {
return {
autoSkip: false,
fontSize: 16,
fontColor: '#383838',
stepSize: 30
};
} else {
return this.tickStyle;
}
}
public lineTensionBoolean() {
return this.lineTension ? 0 : 0.5;
}
public mapExperimentsToChartData(state: VisState) {
const { experiments, dependency, yAxis } = state;
console.log("mapEx state dependency: " + state.dependency.axisName);
console.log("mapEx state constant: " + state.constant.dependence)
const displayedLineType = this.displayedLineType(state.dependency, state.constant);
const numExperiments = experiments.length;
let newData = [];
let pointStylesIndex = 0;
let colorIndex = 0;
experiments.forEach((experiment, index) => {
const xVals = experiment.rows.map(point => point[dependency.raw]);
const yVals = experiment.rows.map(point => point[yAxis.rowKey]);
const chartJSPoints = this.mapXandYPoints(xVals, yVals);
const b = this.highContrastMode ? this.bValues[colorIndex] : Math.floor((255 / numExperiments) * index);
const g = this.highContrastMode ? this.gValues[colorIndex] : Math.floor((255 / numExperiments) * index / 2);
const r = this.highContrastMode ? this.rValues[colorIndex] : 0;
const newChartLine = {
label: experiment[displayedLineType.rowKey] + displayedLineType.unit,
backgroundColor: 'rgba(' + r + ',' + g + ',' + b + ',' + 0.5 + ')',
borderColor: 'rgba(' + r + ',' + g + ',' + b + ',' + 1 + ')',
fill: false,
data: chartJSPoints,
pointStyle: this.alternatePointMode ? this.pointStyles[pointStylesIndex] : 'circle',
};
pointStylesIndex++;
colorIndex ++;
if (colorIndex >= this.rValues.length) {
colorIndex = 0;
}
if (pointStylesIndex >= this.pointStyles.length) {
pointStylesIndex = 0;
}
newData = [...newData, newChartLine];
});
return newData;
}
public displayedLineType(dependency: IXAxis, constant: IXAxis): IXAxis {
const displayed = this.DEPENDENCY_VALUES.filter(val => val !== dependency && val !== constant);
return displayed[0];
}
public mapXandYPoints(xVals, yVals) {
const xyPoints = xVals.map( (e, i) => [e, yVals[i]] ).map( (xs) => {
return { x: xs[0], y: xs[1] };
} );
return this.orderPoints(xyPoints);
}
public orderPoints(array) {
return array.sort((a, b) => (a.x > b.x) ? 1 : ((b.x > a.x) ? -1 : 0));
}
private switchAxisScale(log: boolean, axis: string) {
if (!this.chart) { return; }
if (log) {
this.chart.config.options.scales[axis + 'Axes'][0].type = 'logarithmic';
this.chart.config.options.scales[axis + 'Axes'][0].ticks.autoSkip = true;
this.chart.config.options.scales[axis + 'Axes'][0].ticks.callback = (value, index, arr) => {
if (index === arr.length - 1 || index === 0) {
return value;
} else if (Math.log10(value) % 1 === 0) {
return (value).toLocaleString();
} else {
return '';
}
};
} else {
this.chart.config.options.scales[axis + 'Axes'][0].type = 'linear';
this.chart.config.options.scales[axis + 'Axes'][0].ticks.callback = (value) => value;
this.chart.config.options.scales.xAxes[0].ticks = this.tickOptions();
}
}
public exportPNG() {
const canvas: any = document.getElementById('chart-canvas');
const ctx = canvas.getContext('2d');
return ctx.canvas.toDataURL('image/png');
}
public selectAll() {
// weird quirk of chartjs, visible lines are null hidden
this.updateLineVisibility(null);
}
public deselectAll() {
this.updateLineVisibility(true);
}
public updateLineVisibility(hidden) {
this.chart.data.datasets.forEach( (dataset: any) => {
if (dataset) {
const toUpdate = dataset._meta[Object.keys(dataset._meta)[0]];
if (toUpdate) {
toUpdate.hidden = hidden;
}
}
});
this.chart.update();
}
private drawOnCanvas(chartInstance) {
const chartCtx = chartInstance.chart.ctx;
chartCtx.fillStyle = 'white';
chartCtx.fillRect(0, 0, chartInstance.chart.width, chartInstance.chart.height);
}
private formatTopOfToolTip(item, data) {
const reg = /\((A\/cm|°|A|K|T)\)/;
const reg2 = /\(\d\d°|\d\dK|\d\dT|\dT|\d°|\dK/;
const mainLabel = data.datasets[item.datasetIndex].label;
if (mainLabel.toString().endsWith('°')) {
if (reg.exec(this.xAxisLabel)[1] === 'T') {
return `${reg2.exec(this.title)}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}`;
}
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${reg2.exec(this.title)}, ${mainLabel}`;
} else if (mainLabel.toString().endsWith('K')) {
if (reg.exec(this.xAxisLabel)[1] === '°') {
return `${mainLabel}, ${reg2.exec(this.title)}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}`;
}
return `${mainLabel}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${reg2.exec(this.title)}`;
} else if (mainLabel.toString().endsWith('T')) {
if (reg.exec(this.xAxisLabel)[1] === 'K') {
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}, ${reg2.exec(this.title)}`;
}
return `${reg2.exec(this.title)}, ${mainLabel}, ${item.xLabel}${reg.exec(this.xAxisLabel)[1]}`;
}
return `${item.xLabel}${reg.exec(this.xAxisLabel)[1]}, ${mainLabel}, ${reg2.exec(this.title)}`;
}
private formatBottomOfToolTip(tooltipItems) {
const reg = /\((A\/cm|°|A|K|T)\)/;
const reg2 = /𝘐c\/𝘸|𝘐c|𝘯-value/;
let yUnits = '';
if (reg.exec(this.yAxisLabel) !== null) {
yUnits = reg.exec(this.yAxisLabel)[1];
}
const yLabel = tooltipItems.yLabel;
return `${reg2.exec(this.yAxisLabel)} = ${yLabel} ${yUnits}`;
}
}
| {
this.switchAxisScale(this.logScaleY, 'y');
this.switchAxisScale(this.logScaleX, 'x');
this.chart.config.options.title.text = this.title;
this.chart.config.options.scales.xAxes[0].scaleLabel.labelString = this.xAxisLabel;
this.chart.config.options.scales.yAxes[0].scaleLabel.labelString = this.yAxisLabel;
this.chart.config.options.elements.line.tension = this.lineTensionBoolean();
this.chart.config.options.elements.point.radius = this.pointSize;
this.chart.config.options.elements.point.hoverRadius = this.pointSize + 1;
this.chart.config.options.legend.position = this.getLegendPosition();
this.updateChart();
} | conditional_block |
api.py | # -*- coding: utf-8 -*-
"""
API documentation
https://api.crossref.org/swagger-ui/index.html
"""
"""
/funders/{funder_id} returns metadata for specified funder and its suborganizations
/prefixes/{owner_prefix} returns metadata for the DOI owner prefix
/members/{member_id} returns metadata for a CrossRef member
/types/{type_id} returns information about a metadata work type
/journals/{issn} returns information about a journal with the given ISSN
"""
"""
The works component can be appended to other resources.
resource description
/works/{doi} returns information about the specified CrossRef DOI
/funders/{funder_id}/works returns list of works associated with the specified funder_id
/types/{type_id}/works returns list of works of type type
/prefixes/{owner_prefix}/works returns list of works associated with specified owner_prefix
/members/{member_id}/works returns list of works associated with a CrossRef member (deposited by a CrossRef member)
/journals/{issn}/works returns a list of works in the given journal
from crossref.api import API
api = API()
"""
VERSION = '0.7'
import sys
import re
PY2 = int(sys.version[0]) == 2
#3rd party
#------------------------
import requests
#Local Imports
#------------------------
from .search import search_values as sv
from . import errors
from . import models
from . import utils
from .utils import get_truncated_display_string as td
from .utils import get_list_class_display as cld
display_class = utils.display_class
try:
from . import user_config
except:
raise Exception("User Config is required for running the API")
#/works filters
#https://github.com/CrossRef/rest-api-doc#filter-names
class API(object):
BASE_URL = 'https://api.crossref.org/'
"""
Attributes
----------
session : requests.Session
last_url :
last_response :
last_params :
work_types
"""
search_values = {}
#https://github.com/CrossRef/rest-api-doc#parameters
"""
search_options = {
'filter':'test',
'n_rows':'max # of results to return per request',
'n_random':'Return this # of random values',
'offset':'Return starting at a given position, max=10k',
'query':'Search terms, TODO: provide link to examples',
'sort_by':'A field by which to sort the results, see search_options.sort',
'order':'How to order the results, either "asc" (ascending) or "desc" (descending)',
'facet':'test',
'cursor':'test',
'select':'Fields '}
"""
search_keys = ['cursor',
'facet',
'filter',
'n_rows',
'n_random',
'offset',
'order',
'query',
'select',
'sort_by']
#search_examples = {}
def __init__(self,debug=False,session=None):
"""
Parameters
----------
"""
self.debug = debug
if session is None:
self.session = requests.Session()
else:
self.session = session
self._work_types = None
self.last_error = None
self.rate_limit = 50
self.rate_limit_interval = 1
def get_search_descriptions(key_name):
pass
def get_search_examples(key_name):
pass
@staticmethod
def get_search_options(key_name):
if key_name == 'cursor':
pass
elif key_name == 'facet':
pass
elif key_name == 'filter':
pass
elif key_name == 'n_rows':
return None
elif key_name == 'n_random':
return None
elif key_name == 'offset':
return None
elif key_name == 'order':
return sv.order
elif key_name == 'query':
return None
elif key_name == 'select':
print('select')
elif key_name == 'sort_by':
return sv.sort
pass
def _make_get_request(self,url,object_fh,params=None,return_type=None,extras=None):
"""
This function is the entry point for making requests.
"""
if params is None:
params = {}
if extras is None:
extras = {}
#Polite Pool Work
#---------------------------------------
#Example
#GroovyBib/1.1 (https://example.org/GroovyBib/; mailto:GroovyBib@example.org) BasedOnFunkyLib/1.4.
#It is unclear if we need to match this format
#This is good enough for now
#Eventually we might allow a user to describe their application
#version, and url
ua_str = 'st_crossref/%s (https://github.com/ScholarTools/crossref_api_python; mailto:%s)' % (VERSION,user_config.email)
headers = {'user-agent': ua_str}
#TODO Check params and # of results ...
#TODO: Implement rate limits ...
#The params get passed directly
r = self.session.get(url,params=params,headers=headers)
#Update limits
#---------------------
headers = r.headers
self.rate_limit = headers.get('X-Rate-Limit-Limit',50)
self.rate_limit_interval = int(headers.get('X-Rate-Limit-Interval','1s')[:-1])
#TODO: Implement ...https://konghq.com/blog/how-to-design-a-scalable-rate-limiting-algorithm/
#These are debug only and should not be used for anything else
#-------------------------------------------------------------
self.last_url = url
self.last_response = r
self.last_params = params
if r.status_code == 404:
#This typically happens when the DOI is invalid
#TODO: Make this a named exception
raise errors.RequestError(r.text)
json_data = r.json()
if json_data['status'] == 'failed':
self.last_error = json_data
raise errors.CrossrefAPIError(json_data['message'])
#Example error
"""
{'status': 'failed', 'message-type': 'validation-failure',
'message': [{'value': 'sample',
'message': 'This route does not support sample', 'type': 'parameter-not-allowed'}]}
"""
#TODO: return_type
if return_type == 'json' or object_fh is None:
return json_data
else:
return object_fh(json_data,self)
def _options_to_dict(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None):
#https://github.com/CrossRef/rest-api-doc#parameters
#I'm not thrilled about order ...
#
params = {
'cursor':cursor,
'facet':facet,
'filter':filter,
'offset':offset,
'order':order,
'query':query,
'rows':n_rows,
'select':select,
'sample':n_random,
'sort':sort_by}
#TODO: We have some more processing to do here
#=> filter processsing
#=> select
"""
DONE query query terms
DONE filter={filter_name}:{value} filter results by specific fields
DONE rows={#} results per per page
DONE offset={#} (mak 10k) result offset (user cursor for larger /works result sets)
DONE sample={#} (max 100) return random N results
DONE sort={#} sort results by a certain field
DONE order={#} set the sort order to asc or desc
DONE facet={#} enable facet information in responses
DONE cursor={#} deep page through /works result sets
"""
return params
def funders(self,filter=None,n_rows=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,return_type=None):
"""
n_random not supported
select not supported
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=None,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'funders'
return self._make_get_request(url,models.FundersSearchResult,params,return_type)
def journals(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,return_type=None):
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'journals'
return self._make_get_request(url,models.JournalSearchResult,params,return_type)
def licenses(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None,return_type=None):
"""
??? - This seems to return all licenses
Example Data
------------
.URL
.work-count
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'licenses'
#return self._make_search_request(url,models.LicenseSearchResult,options,_filter)
return self._make_get_request(url,models.LicenseSearchResult,params,return_type)
def members(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None,return_type=None):
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=select)
url = self.BASE_URL + 'members/'
return self._make_get_request(url,models.MembersSearchResult,params,return_type)
def works_doi(self,doi,return_type=None):
"""
Examples
---------
from crossref.api import API
api = API()
doi = '10.1016/j.urology.2023.01.012'
result = api.works_doi(doi)
"""
params = {}
url = self.BASE_URL + f'works/{doi}/'
return self._make_get_request(url,None,params,return_type)
def works(self,
filter=None,
n_rows=None,
n_random=None,
offset=None,
query=None,
sort_by=None,
order=None,
facet=None,
cursor=None,
select=None,
return_type=None):
"""
Parameters
----------
options : QueryOptions
_filter : Filter
Returns
-------
crossref.models.WorkList
Invalid options
---------------
sample
Example
-------
TODO: Do we get a 'next' link?
TODO: Make sure the model methods show in the display ...
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=select)
url = self.BASE_URL + 'works'
#return self._make_search_request(url,models.WorksSearchResult,options,_filter)
return self._make_get_request(url,models.WorksSearchResult,params,return_type)
def work_types(self):
"""
TODO: Cache this response
"""
if self._work_types is None:
url = self.BASE_URL + 'types'
self._work_types = self._make_get_request(url,models.TypesList,None,None)
return self._work_types
def doi_info(self,doi):
"""
Returns
-------
crossref.models.Work
If the DOI is not found the errors.InvalidDOI exception is raised.
Example
-------
import crossref
api = crossref.API()
m = api.doi_info('10.1109/TNSRE.2011.2163145')
"""
doi = _clean_doi(doi)
url = self.BASE_URL + 'works/' + doi
try:
return self._make_get_request(url,models.work_single)
except errors.RequestError:
#TODO: Check for 404
#last_response.status_code
#TODO: Do this only if debugging is enabled
if self.debug:
#TODO: Also report code
print("Error msg from server: " + self.last_response.text)
raise errors.InvalidDOI('Invalid DOI requested: ' + doi)
#return self._make_get_request(url,models.Work,kwargs)
def funder_info(self,funder_id):
"""
Example Data
------------
"""
url = self.BASE_URL + 'funders/' + funder_id
return self._make_get_request(url,models.funder_single)
def journal_info(self,journal_id):
"""
Example Data
------------
"""
url = self.BASE_URL + 'journals/' + journal_id
return self._make_get_request(url,models.journal_single)
def member_info(self,member_id):
"""
Example Data
------------
<class 'crossref.models.Member'>:
last_status_check_time: 1522803773023
primary_name: Wiley-Blackwell
counts: <dict> with 3 fields
breakdowns: <dict> with 1 fields
prefixes: <list> len 33
coverage: <dict> with 18 fields
prefix: <list> len 33
id: 311
tokens: ['wiley', 'blackwell']
flags: <dict> with 20 fields
location: 111 River Street Hoboken NJ 07...
names: <list> len 33
"""
url = self.BASE_URL + 'members/' + member_id
return self._make_get_request(url,models.member_single)
def prefix_info(self,prefix_id):
"""
Returns metadata for the DOI owner prefix
Returns
-------
crossref.models.Prefix
Implements
----------
/prefixes/{owner_prefix}
Example Data
------------
<class 'crossref.models.Prefix'>:
member: http://id.crossref.org/member/311
name: Wiley-Blackwell
prefix: http://id.crossref.org/prefix/10.1002
"""
url = self.BASE_URL + 'prefixes/' + prefix_id
return self._make_get_request(url,models.prefix_single)
def work_type_info(self,type_id):
"""
This doesn't seem to be all that useful, since it just returns
the subset of work_types()
Example
-------
api.work_type_info('journal')
e.g. {'id': 'journal', 'label': 'Journal'}
"""
url = self.BASE_URL + 'types/' + type_id
return self._make_get_request(url,models.pass_through)
def __repr__(self):
"""
self.debug = debug
if session is None:
self.session = requests.Session()
else:
self.session = session
self._work_types = None
self.last_error = None
self.rate_limit = 50
self.rate_limit_interval = 1
Returns
-------
TYPE
DESCRIPTION.
"""
pv = [
'debug',self.debug,
'session',cld(self.session),
'last_error',cld(self.last_error),
'rate_limit',self.rate_limit,
'rate_limit_interval',self.rate_limit_interval,
'retrieval',cld(self.retrieval),
'methods()','------------------------',
]
return utils.property_values_to_string(pv)
def _validate_config(user_config):
#1) Validate email
if hasattr(user_config,'email') and len(user_config.email) > 0:
pass
else:
raise Exception("Invalid email, email required in user_config")
_validate_config(user_config)
def _clean_doi(input_doi):
"""
This code was borrowed from:
https://github.com/Impactstory/total-impact-core/
"""
"""
Forms
-----
http://dx.doi.org/10.
doi:
10.
"""
#Hopefully we don't need to worry about this for now ...
#input_doi = remove_nonprinting_characters(input_doi)
input_doi = input_doi.lower()
if input_doi.startswith("http"):
match = re.match("^https*://(dx\.)*doi.org/(10\..+)", input_doi)
doi = match.group(2)
elif "doi.org" in input_doi:
match = re.match("^(dx\.)*doi.org/(10\..+)", input_doi)
doi = match.group(2)
elif input_doi.startswith("doi:"):
match = re.match("^doi:(10\..+)", input_doi)
doi = match.group(1)
elif input_doi.startswith("10."):
doi = input_doi
elif "10." in input_doi:
match = re.match(".*(10\.\d+.+)", input_doi, re.DOTALL)
doi = match.group(1)
else:
raise Exception('Unable to clean DOI: %s'%input_doi)
| return doi | random_line_split | |
api.py | # -*- coding: utf-8 -*-
"""
API documentation
https://api.crossref.org/swagger-ui/index.html
"""
"""
/funders/{funder_id} returns metadata for specified funder and its suborganizations
/prefixes/{owner_prefix} returns metadata for the DOI owner prefix
/members/{member_id} returns metadata for a CrossRef member
/types/{type_id} returns information about a metadata work type
/journals/{issn} returns information about a journal with the given ISSN
"""
"""
The works component can be appended to other resources.
resource description
/works/{doi} returns information about the specified CrossRef DOI
/funders/{funder_id}/works returns list of works associated with the specified funder_id
/types/{type_id}/works returns list of works of type type
/prefixes/{owner_prefix}/works returns list of works associated with specified owner_prefix
/members/{member_id}/works returns list of works associated with a CrossRef member (deposited by a CrossRef member)
/journals/{issn}/works returns a list of works in the given journal
from crossref.api import API
api = API()
"""
VERSION = '0.7'
import sys
import re
PY2 = int(sys.version[0]) == 2
#3rd party
#------------------------
import requests
#Local Imports
#------------------------
from .search import search_values as sv
from . import errors
from . import models
from . import utils
from .utils import get_truncated_display_string as td
from .utils import get_list_class_display as cld
display_class = utils.display_class
try:
from . import user_config
except:
raise Exception("User Config is required for running the API")
#/works filters
#https://github.com/CrossRef/rest-api-doc#filter-names
class API(object):
BASE_URL = 'https://api.crossref.org/'
"""
Attributes
----------
session : requests.Session
last_url :
last_response :
last_params :
work_types
"""
search_values = {}
#https://github.com/CrossRef/rest-api-doc#parameters
"""
search_options = {
'filter':'test',
'n_rows':'max # of results to return per request',
'n_random':'Return this # of random values',
'offset':'Return starting at a given position, max=10k',
'query':'Search terms, TODO: provide link to examples',
'sort_by':'A field by which to sort the results, see search_options.sort',
'order':'How to order the results, either "asc" (ascending) or "desc" (descending)',
'facet':'test',
'cursor':'test',
'select':'Fields '}
"""
search_keys = ['cursor',
'facet',
'filter',
'n_rows',
'n_random',
'offset',
'order',
'query',
'select',
'sort_by']
#search_examples = {}
def __init__(self,debug=False,session=None):
"""
Parameters
----------
"""
self.debug = debug
if session is None:
self.session = requests.Session()
else:
self.session = session
self._work_types = None
self.last_error = None
self.rate_limit = 50
self.rate_limit_interval = 1
def get_search_descriptions(key_name):
pass
def get_search_examples(key_name):
pass
@staticmethod
def get_search_options(key_name):
if key_name == 'cursor':
pass
elif key_name == 'facet':
|
elif key_name == 'filter':
pass
elif key_name == 'n_rows':
return None
elif key_name == 'n_random':
return None
elif key_name == 'offset':
return None
elif key_name == 'order':
return sv.order
elif key_name == 'query':
return None
elif key_name == 'select':
print('select')
elif key_name == 'sort_by':
return sv.sort
pass
def _make_get_request(self,url,object_fh,params=None,return_type=None,extras=None):
"""
This function is the entry point for making requests.
"""
if params is None:
params = {}
if extras is None:
extras = {}
#Polite Pool Work
#---------------------------------------
#Example
#GroovyBib/1.1 (https://example.org/GroovyBib/; mailto:GroovyBib@example.org) BasedOnFunkyLib/1.4.
#It is unclear if we need to match this format
#This is good enough for now
#Eventually we might allow a user to describe their application
#version, and url
ua_str = 'st_crossref/%s (https://github.com/ScholarTools/crossref_api_python; mailto:%s)' % (VERSION,user_config.email)
headers = {'user-agent': ua_str}
#TODO Check params and # of results ...
#TODO: Implement rate limits ...
#The params get passed directly
r = self.session.get(url,params=params,headers=headers)
#Update limits
#---------------------
headers = r.headers
self.rate_limit = headers.get('X-Rate-Limit-Limit',50)
self.rate_limit_interval = int(headers.get('X-Rate-Limit-Interval','1s')[:-1])
#TODO: Implement ...https://konghq.com/blog/how-to-design-a-scalable-rate-limiting-algorithm/
#These are debug only and should not be used for anything else
#-------------------------------------------------------------
self.last_url = url
self.last_response = r
self.last_params = params
if r.status_code == 404:
#This typically happens when the DOI is invalid
#TODO: Make this a named exception
raise errors.RequestError(r.text)
json_data = r.json()
if json_data['status'] == 'failed':
self.last_error = json_data
raise errors.CrossrefAPIError(json_data['message'])
#Example error
"""
{'status': 'failed', 'message-type': 'validation-failure',
'message': [{'value': 'sample',
'message': 'This route does not support sample', 'type': 'parameter-not-allowed'}]}
"""
#TODO: return_type
if return_type == 'json' or object_fh is None:
return json_data
else:
return object_fh(json_data,self)
def _options_to_dict(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None):
#https://github.com/CrossRef/rest-api-doc#parameters
#I'm not thrilled about order ...
#
params = {
'cursor':cursor,
'facet':facet,
'filter':filter,
'offset':offset,
'order':order,
'query':query,
'rows':n_rows,
'select':select,
'sample':n_random,
'sort':sort_by}
#TODO: We have some more processing to do here
#=> filter processsing
#=> select
"""
DONE query query terms
DONE filter={filter_name}:{value} filter results by specific fields
DONE rows={#} results per per page
DONE offset={#} (mak 10k) result offset (user cursor for larger /works result sets)
DONE sample={#} (max 100) return random N results
DONE sort={#} sort results by a certain field
DONE order={#} set the sort order to asc or desc
DONE facet={#} enable facet information in responses
DONE cursor={#} deep page through /works result sets
"""
return params
def funders(self,filter=None,n_rows=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,return_type=None):
"""
n_random not supported
select not supported
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=None,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'funders'
return self._make_get_request(url,models.FundersSearchResult,params,return_type)
def journals(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,return_type=None):
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'journals'
return self._make_get_request(url,models.JournalSearchResult,params,return_type)
def licenses(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None,return_type=None):
"""
??? - This seems to return all licenses
Example Data
------------
.URL
.work-count
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'licenses'
#return self._make_search_request(url,models.LicenseSearchResult,options,_filter)
return self._make_get_request(url,models.LicenseSearchResult,params,return_type)
def members(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None,return_type=None):
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=select)
url = self.BASE_URL + 'members/'
return self._make_get_request(url,models.MembersSearchResult,params,return_type)
def works_doi(self,doi,return_type=None):
"""
Examples
---------
from crossref.api import API
api = API()
doi = '10.1016/j.urology.2023.01.012'
result = api.works_doi(doi)
"""
params = {}
url = self.BASE_URL + f'works/{doi}/'
return self._make_get_request(url,None,params,return_type)
def works(self,
filter=None,
n_rows=None,
n_random=None,
offset=None,
query=None,
sort_by=None,
order=None,
facet=None,
cursor=None,
select=None,
return_type=None):
"""
Parameters
----------
options : QueryOptions
_filter : Filter
Returns
-------
crossref.models.WorkList
Invalid options
---------------
sample
Example
-------
TODO: Do we get a 'next' link?
TODO: Make sure the model methods show in the display ...
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=select)
url = self.BASE_URL + 'works'
#return self._make_search_request(url,models.WorksSearchResult,options,_filter)
return self._make_get_request(url,models.WorksSearchResult,params,return_type)
def work_types(self):
"""
TODO: Cache this response
"""
if self._work_types is None:
url = self.BASE_URL + 'types'
self._work_types = self._make_get_request(url,models.TypesList,None,None)
return self._work_types
def doi_info(self,doi):
"""
Returns
-------
crossref.models.Work
If the DOI is not found the errors.InvalidDOI exception is raised.
Example
-------
import crossref
api = crossref.API()
m = api.doi_info('10.1109/TNSRE.2011.2163145')
"""
doi = _clean_doi(doi)
url = self.BASE_URL + 'works/' + doi
try:
return self._make_get_request(url,models.work_single)
except errors.RequestError:
#TODO: Check for 404
#last_response.status_code
#TODO: Do this only if debugging is enabled
if self.debug:
#TODO: Also report code
print("Error msg from server: " + self.last_response.text)
raise errors.InvalidDOI('Invalid DOI requested: ' + doi)
#return self._make_get_request(url,models.Work,kwargs)
def funder_info(self,funder_id):
"""
Example Data
------------
"""
url = self.BASE_URL + 'funders/' + funder_id
return self._make_get_request(url,models.funder_single)
def journal_info(self,journal_id):
"""
Example Data
------------
"""
url = self.BASE_URL + 'journals/' + journal_id
return self._make_get_request(url,models.journal_single)
def member_info(self,member_id):
"""
Example Data
------------
<class 'crossref.models.Member'>:
last_status_check_time: 1522803773023
primary_name: Wiley-Blackwell
counts: <dict> with 3 fields
breakdowns: <dict> with 1 fields
prefixes: <list> len 33
coverage: <dict> with 18 fields
prefix: <list> len 33
id: 311
tokens: ['wiley', 'blackwell']
flags: <dict> with 20 fields
location: 111 River Street Hoboken NJ 07...
names: <list> len 33
"""
url = self.BASE_URL + 'members/' + member_id
return self._make_get_request(url,models.member_single)
def prefix_info(self,prefix_id):
"""
Returns metadata for the DOI owner prefix
Returns
-------
crossref.models.Prefix
Implements
----------
/prefixes/{owner_prefix}
Example Data
------------
<class 'crossref.models.Prefix'>:
member: http://id.crossref.org/member/311
name: Wiley-Blackwell
prefix: http://id.crossref.org/prefix/10.1002
"""
url = self.BASE_URL + 'prefixes/' + prefix_id
return self._make_get_request(url,models.prefix_single)
def work_type_info(self,type_id):
"""
This doesn't seem to be all that useful, since it just returns
the subset of work_types()
Example
-------
api.work_type_info('journal')
e.g. {'id': 'journal', 'label': 'Journal'}
"""
url = self.BASE_URL + 'types/' + type_id
return self._make_get_request(url,models.pass_through)
def __repr__(self):
"""
self.debug = debug
if session is None:
self.session = requests.Session()
else:
self.session = session
self._work_types = None
self.last_error = None
self.rate_limit = 50
self.rate_limit_interval = 1
Returns
-------
TYPE
DESCRIPTION.
"""
pv = [
'debug',self.debug,
'session',cld(self.session),
'last_error',cld(self.last_error),
'rate_limit',self.rate_limit,
'rate_limit_interval',self.rate_limit_interval,
'retrieval',cld(self.retrieval),
'methods()','------------------------',
]
return utils.property_values_to_string(pv)
def _validate_config(user_config):
#1) Validate email
if hasattr(user_config,'email') and len(user_config.email) > 0:
pass
else:
raise Exception("Invalid email, email required in user_config")
_validate_config(user_config)
def _clean_doi(input_doi):
"""
This code was borrowed from:
https://github.com/Impactstory/total-impact-core/
"""
"""
Forms
-----
http://dx.doi.org/10.
doi:
10.
"""
#Hopefully we don't need to worry about this for now ...
#input_doi = remove_nonprinting_characters(input_doi)
input_doi = input_doi.lower()
if input_doi.startswith("http"):
match = re.match("^https*://(dx\.)*doi.org/(10\..+)", input_doi)
doi = match.group(2)
elif "doi.org" in input_doi:
match = re.match("^(dx\.)*doi.org/(10\..+)", input_doi)
doi = match.group(2)
elif input_doi.startswith("doi:"):
match = re.match("^doi:(10\..+)", input_doi)
doi = match.group(1)
elif input_doi.startswith("10."):
doi = input_doi
elif "10." in input_doi:
match = re.match(".*(10\.\d+.+)", input_doi, re.DOTALL)
doi = match.group(1)
else:
raise Exception('Unable to clean DOI: %s'%input_doi)
return doi | pass | conditional_block |
api.py | # -*- coding: utf-8 -*-
"""
API documentation
https://api.crossref.org/swagger-ui/index.html
"""
"""
/funders/{funder_id} returns metadata for specified funder and its suborganizations
/prefixes/{owner_prefix} returns metadata for the DOI owner prefix
/members/{member_id} returns metadata for a CrossRef member
/types/{type_id} returns information about a metadata work type
/journals/{issn} returns information about a journal with the given ISSN
"""
"""
The works component can be appended to other resources.
resource description
/works/{doi} returns information about the specified CrossRef DOI
/funders/{funder_id}/works returns list of works associated with the specified funder_id
/types/{type_id}/works returns list of works of type type
/prefixes/{owner_prefix}/works returns list of works associated with specified owner_prefix
/members/{member_id}/works returns list of works associated with a CrossRef member (deposited by a CrossRef member)
/journals/{issn}/works returns a list of works in the given journal
from crossref.api import API
api = API()
"""
VERSION = '0.7'
import sys
import re
PY2 = int(sys.version[0]) == 2
#3rd party
#------------------------
import requests
#Local Imports
#------------------------
from .search import search_values as sv
from . import errors
from . import models
from . import utils
from .utils import get_truncated_display_string as td
from .utils import get_list_class_display as cld
display_class = utils.display_class
try:
from . import user_config
except:
raise Exception("User Config is required for running the API")
#/works filters
#https://github.com/CrossRef/rest-api-doc#filter-names
class API(object):
BASE_URL = 'https://api.crossref.org/'
"""
Attributes
----------
session : requests.Session
last_url :
last_response :
last_params :
work_types
"""
search_values = {}
#https://github.com/CrossRef/rest-api-doc#parameters
"""
search_options = {
'filter':'test',
'n_rows':'max # of results to return per request',
'n_random':'Return this # of random values',
'offset':'Return starting at a given position, max=10k',
'query':'Search terms, TODO: provide link to examples',
'sort_by':'A field by which to sort the results, see search_options.sort',
'order':'How to order the results, either "asc" (ascending) or "desc" (descending)',
'facet':'test',
'cursor':'test',
'select':'Fields '}
"""
search_keys = ['cursor',
'facet',
'filter',
'n_rows',
'n_random',
'offset',
'order',
'query',
'select',
'sort_by']
#search_examples = {}
def __init__(self,debug=False,session=None):
"""
Parameters
----------
"""
self.debug = debug
if session is None:
self.session = requests.Session()
else:
self.session = session
self._work_types = None
self.last_error = None
self.rate_limit = 50
self.rate_limit_interval = 1
def get_search_descriptions(key_name):
pass
def get_search_examples(key_name):
pass
@staticmethod
def get_search_options(key_name):
if key_name == 'cursor':
pass
elif key_name == 'facet':
pass
elif key_name == 'filter':
pass
elif key_name == 'n_rows':
return None
elif key_name == 'n_random':
return None
elif key_name == 'offset':
return None
elif key_name == 'order':
return sv.order
elif key_name == 'query':
return None
elif key_name == 'select':
print('select')
elif key_name == 'sort_by':
return sv.sort
pass
def _make_get_request(self,url,object_fh,params=None,return_type=None,extras=None):
|
def _options_to_dict(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None):
#https://github.com/CrossRef/rest-api-doc#parameters
#I'm not thrilled about order ...
#
params = {
'cursor':cursor,
'facet':facet,
'filter':filter,
'offset':offset,
'order':order,
'query':query,
'rows':n_rows,
'select':select,
'sample':n_random,
'sort':sort_by}
#TODO: We have some more processing to do here
#=> filter processsing
#=> select
"""
DONE query query terms
DONE filter={filter_name}:{value} filter results by specific fields
DONE rows={#} results per per page
DONE offset={#} (mak 10k) result offset (user cursor for larger /works result sets)
DONE sample={#} (max 100) return random N results
DONE sort={#} sort results by a certain field
DONE order={#} set the sort order to asc or desc
DONE facet={#} enable facet information in responses
DONE cursor={#} deep page through /works result sets
"""
return params
def funders(self,filter=None,n_rows=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,return_type=None):
"""
n_random not supported
select not supported
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=None,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'funders'
return self._make_get_request(url,models.FundersSearchResult,params,return_type)
def journals(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,return_type=None):
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'journals'
return self._make_get_request(url,models.JournalSearchResult,params,return_type)
def licenses(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None,return_type=None):
"""
??? - This seems to return all licenses
Example Data
------------
.URL
.work-count
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'licenses'
#return self._make_search_request(url,models.LicenseSearchResult,options,_filter)
return self._make_get_request(url,models.LicenseSearchResult,params,return_type)
def members(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None,return_type=None):
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=select)
url = self.BASE_URL + 'members/'
return self._make_get_request(url,models.MembersSearchResult,params,return_type)
def works_doi(self,doi,return_type=None):
"""
Examples
---------
from crossref.api import API
api = API()
doi = '10.1016/j.urology.2023.01.012'
result = api.works_doi(doi)
"""
params = {}
url = self.BASE_URL + f'works/{doi}/'
return self._make_get_request(url,None,params,return_type)
def works(self,
filter=None,
n_rows=None,
n_random=None,
offset=None,
query=None,
sort_by=None,
order=None,
facet=None,
cursor=None,
select=None,
return_type=None):
"""
Parameters
----------
options : QueryOptions
_filter : Filter
Returns
-------
crossref.models.WorkList
Invalid options
---------------
sample
Example
-------
TODO: Do we get a 'next' link?
TODO: Make sure the model methods show in the display ...
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=select)
url = self.BASE_URL + 'works'
#return self._make_search_request(url,models.WorksSearchResult,options,_filter)
return self._make_get_request(url,models.WorksSearchResult,params,return_type)
def work_types(self):
"""
TODO: Cache this response
"""
if self._work_types is None:
url = self.BASE_URL + 'types'
self._work_types = self._make_get_request(url,models.TypesList,None,None)
return self._work_types
def doi_info(self,doi):
"""
Returns
-------
crossref.models.Work
If the DOI is not found the errors.InvalidDOI exception is raised.
Example
-------
import crossref
api = crossref.API()
m = api.doi_info('10.1109/TNSRE.2011.2163145')
"""
doi = _clean_doi(doi)
url = self.BASE_URL + 'works/' + doi
try:
return self._make_get_request(url,models.work_single)
except errors.RequestError:
#TODO: Check for 404
#last_response.status_code
#TODO: Do this only if debugging is enabled
if self.debug:
#TODO: Also report code
print("Error msg from server: " + self.last_response.text)
raise errors.InvalidDOI('Invalid DOI requested: ' + doi)
#return self._make_get_request(url,models.Work,kwargs)
def funder_info(self,funder_id):
"""
Example Data
------------
"""
url = self.BASE_URL + 'funders/' + funder_id
return self._make_get_request(url,models.funder_single)
def journal_info(self,journal_id):
"""
Example Data
------------
"""
url = self.BASE_URL + 'journals/' + journal_id
return self._make_get_request(url,models.journal_single)
def member_info(self,member_id):
"""
Example Data
------------
<class 'crossref.models.Member'>:
last_status_check_time: 1522803773023
primary_name: Wiley-Blackwell
counts: <dict> with 3 fields
breakdowns: <dict> with 1 fields
prefixes: <list> len 33
coverage: <dict> with 18 fields
prefix: <list> len 33
id: 311
tokens: ['wiley', 'blackwell']
flags: <dict> with 20 fields
location: 111 River Street Hoboken NJ 07...
names: <list> len 33
"""
url = self.BASE_URL + 'members/' + member_id
return self._make_get_request(url,models.member_single)
def prefix_info(self,prefix_id):
"""
Returns metadata for the DOI owner prefix
Returns
-------
crossref.models.Prefix
Implements
----------
/prefixes/{owner_prefix}
Example Data
------------
<class 'crossref.models.Prefix'>:
member: http://id.crossref.org/member/311
name: Wiley-Blackwell
prefix: http://id.crossref.org/prefix/10.1002
"""
url = self.BASE_URL + 'prefixes/' + prefix_id
return self._make_get_request(url,models.prefix_single)
def work_type_info(self,type_id):
"""
This doesn't seem to be all that useful, since it just returns
the subset of work_types()
Example
-------
api.work_type_info('journal')
e.g. {'id': 'journal', 'label': 'Journal'}
"""
url = self.BASE_URL + 'types/' + type_id
return self._make_get_request(url,models.pass_through)
def __repr__(self):
"""
self.debug = debug
if session is None:
self.session = requests.Session()
else:
self.session = session
self._work_types = None
self.last_error = None
self.rate_limit = 50
self.rate_limit_interval = 1
Returns
-------
TYPE
DESCRIPTION.
"""
pv = [
'debug',self.debug,
'session',cld(self.session),
'last_error',cld(self.last_error),
'rate_limit',self.rate_limit,
'rate_limit_interval',self.rate_limit_interval,
'retrieval',cld(self.retrieval),
'methods()','------------------------',
]
return utils.property_values_to_string(pv)
def _validate_config(user_config):
#1) Validate email
if hasattr(user_config,'email') and len(user_config.email) > 0:
pass
else:
raise Exception("Invalid email, email required in user_config")
_validate_config(user_config)
def _clean_doi(input_doi):
"""
This code was borrowed from:
https://github.com/Impactstory/total-impact-core/
"""
"""
Forms
-----
http://dx.doi.org/10.
doi:
10.
"""
#Hopefully we don't need to worry about this for now ...
#input_doi = remove_nonprinting_characters(input_doi)
input_doi = input_doi.lower()
if input_doi.startswith("http"):
match = re.match("^https*://(dx\.)*doi.org/(10\..+)", input_doi)
doi = match.group(2)
elif "doi.org" in input_doi:
match = re.match("^(dx\.)*doi.org/(10\..+)", input_doi)
doi = match.group(2)
elif input_doi.startswith("doi:"):
match = re.match("^doi:(10\..+)", input_doi)
doi = match.group(1)
elif input_doi.startswith("10."):
doi = input_doi
elif "10." in input_doi:
match = re.match(".*(10\.\d+.+)", input_doi, re.DOTALL)
doi = match.group(1)
else:
raise Exception('Unable to clean DOI: %s'%input_doi)
return doi | """
This function is the entry point for making requests.
"""
if params is None:
params = {}
if extras is None:
extras = {}
#Polite Pool Work
#---------------------------------------
#Example
#GroovyBib/1.1 (https://example.org/GroovyBib/; mailto:GroovyBib@example.org) BasedOnFunkyLib/1.4.
#It is unclear if we need to match this format
#This is good enough for now
#Eventually we might allow a user to describe their application
#version, and url
ua_str = 'st_crossref/%s (https://github.com/ScholarTools/crossref_api_python; mailto:%s)' % (VERSION,user_config.email)
headers = {'user-agent': ua_str}
#TODO Check params and # of results ...
#TODO: Implement rate limits ...
#The params get passed directly
r = self.session.get(url,params=params,headers=headers)
#Update limits
#---------------------
headers = r.headers
self.rate_limit = headers.get('X-Rate-Limit-Limit',50)
self.rate_limit_interval = int(headers.get('X-Rate-Limit-Interval','1s')[:-1])
#TODO: Implement ...https://konghq.com/blog/how-to-design-a-scalable-rate-limiting-algorithm/
#These are debug only and should not be used for anything else
#-------------------------------------------------------------
self.last_url = url
self.last_response = r
self.last_params = params
if r.status_code == 404:
#This typically happens when the DOI is invalid
#TODO: Make this a named exception
raise errors.RequestError(r.text)
json_data = r.json()
if json_data['status'] == 'failed':
self.last_error = json_data
raise errors.CrossrefAPIError(json_data['message'])
#Example error
"""
{'status': 'failed', 'message-type': 'validation-failure',
'message': [{'value': 'sample',
'message': 'This route does not support sample', 'type': 'parameter-not-allowed'}]}
"""
#TODO: return_type
if return_type == 'json' or object_fh is None:
return json_data
else:
return object_fh(json_data,self) | identifier_body |
api.py | # -*- coding: utf-8 -*-
"""
API documentation
https://api.crossref.org/swagger-ui/index.html
"""
"""
/funders/{funder_id} returns metadata for specified funder and its suborganizations
/prefixes/{owner_prefix} returns metadata for the DOI owner prefix
/members/{member_id} returns metadata for a CrossRef member
/types/{type_id} returns information about a metadata work type
/journals/{issn} returns information about a journal with the given ISSN
"""
"""
The works component can be appended to other resources.
resource description
/works/{doi} returns information about the specified CrossRef DOI
/funders/{funder_id}/works returns list of works associated with the specified funder_id
/types/{type_id}/works returns list of works of type type
/prefixes/{owner_prefix}/works returns list of works associated with specified owner_prefix
/members/{member_id}/works returns list of works associated with a CrossRef member (deposited by a CrossRef member)
/journals/{issn}/works returns a list of works in the given journal
from crossref.api import API
api = API()
"""
VERSION = '0.7'
import sys
import re
PY2 = int(sys.version[0]) == 2
#3rd party
#------------------------
import requests
#Local Imports
#------------------------
from .search import search_values as sv
from . import errors
from . import models
from . import utils
from .utils import get_truncated_display_string as td
from .utils import get_list_class_display as cld
display_class = utils.display_class
try:
from . import user_config
except:
raise Exception("User Config is required for running the API")
#/works filters
#https://github.com/CrossRef/rest-api-doc#filter-names
class API(object):
BASE_URL = 'https://api.crossref.org/'
"""
Attributes
----------
session : requests.Session
last_url :
last_response :
last_params :
work_types
"""
search_values = {}
#https://github.com/CrossRef/rest-api-doc#parameters
"""
search_options = {
'filter':'test',
'n_rows':'max # of results to return per request',
'n_random':'Return this # of random values',
'offset':'Return starting at a given position, max=10k',
'query':'Search terms, TODO: provide link to examples',
'sort_by':'A field by which to sort the results, see search_options.sort',
'order':'How to order the results, either "asc" (ascending) or "desc" (descending)',
'facet':'test',
'cursor':'test',
'select':'Fields '}
"""
search_keys = ['cursor',
'facet',
'filter',
'n_rows',
'n_random',
'offset',
'order',
'query',
'select',
'sort_by']
#search_examples = {}
def __init__(self,debug=False,session=None):
"""
Parameters
----------
"""
self.debug = debug
if session is None:
self.session = requests.Session()
else:
self.session = session
self._work_types = None
self.last_error = None
self.rate_limit = 50
self.rate_limit_interval = 1
def get_search_descriptions(key_name):
pass
def get_search_examples(key_name):
pass
@staticmethod
def get_search_options(key_name):
if key_name == 'cursor':
pass
elif key_name == 'facet':
pass
elif key_name == 'filter':
pass
elif key_name == 'n_rows':
return None
elif key_name == 'n_random':
return None
elif key_name == 'offset':
return None
elif key_name == 'order':
return sv.order
elif key_name == 'query':
return None
elif key_name == 'select':
print('select')
elif key_name == 'sort_by':
return sv.sort
pass
def _make_get_request(self,url,object_fh,params=None,return_type=None,extras=None):
"""
This function is the entry point for making requests.
"""
if params is None:
params = {}
if extras is None:
extras = {}
#Polite Pool Work
#---------------------------------------
#Example
#GroovyBib/1.1 (https://example.org/GroovyBib/; mailto:GroovyBib@example.org) BasedOnFunkyLib/1.4.
#It is unclear if we need to match this format
#This is good enough for now
#Eventually we might allow a user to describe their application
#version, and url
ua_str = 'st_crossref/%s (https://github.com/ScholarTools/crossref_api_python; mailto:%s)' % (VERSION,user_config.email)
headers = {'user-agent': ua_str}
#TODO Check params and # of results ...
#TODO: Implement rate limits ...
#The params get passed directly
r = self.session.get(url,params=params,headers=headers)
#Update limits
#---------------------
headers = r.headers
self.rate_limit = headers.get('X-Rate-Limit-Limit',50)
self.rate_limit_interval = int(headers.get('X-Rate-Limit-Interval','1s')[:-1])
#TODO: Implement ...https://konghq.com/blog/how-to-design-a-scalable-rate-limiting-algorithm/
#These are debug only and should not be used for anything else
#-------------------------------------------------------------
self.last_url = url
self.last_response = r
self.last_params = params
if r.status_code == 404:
#This typically happens when the DOI is invalid
#TODO: Make this a named exception
raise errors.RequestError(r.text)
json_data = r.json()
if json_data['status'] == 'failed':
self.last_error = json_data
raise errors.CrossrefAPIError(json_data['message'])
#Example error
"""
{'status': 'failed', 'message-type': 'validation-failure',
'message': [{'value': 'sample',
'message': 'This route does not support sample', 'type': 'parameter-not-allowed'}]}
"""
#TODO: return_type
if return_type == 'json' or object_fh is None:
return json_data
else:
return object_fh(json_data,self)
def | (self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None):
#https://github.com/CrossRef/rest-api-doc#parameters
#I'm not thrilled about order ...
#
params = {
'cursor':cursor,
'facet':facet,
'filter':filter,
'offset':offset,
'order':order,
'query':query,
'rows':n_rows,
'select':select,
'sample':n_random,
'sort':sort_by}
#TODO: We have some more processing to do here
#=> filter processsing
#=> select
"""
DONE query query terms
DONE filter={filter_name}:{value} filter results by specific fields
DONE rows={#} results per per page
DONE offset={#} (mak 10k) result offset (user cursor for larger /works result sets)
DONE sample={#} (max 100) return random N results
DONE sort={#} sort results by a certain field
DONE order={#} set the sort order to asc or desc
DONE facet={#} enable facet information in responses
DONE cursor={#} deep page through /works result sets
"""
return params
def funders(self,filter=None,n_rows=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,return_type=None):
"""
n_random not supported
select not supported
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=None,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'funders'
return self._make_get_request(url,models.FundersSearchResult,params,return_type)
def journals(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,return_type=None):
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'journals'
return self._make_get_request(url,models.JournalSearchResult,params,return_type)
def licenses(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None,return_type=None):
"""
??? - This seems to return all licenses
Example Data
------------
.URL
.work-count
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=None)
url = self.BASE_URL + 'licenses'
#return self._make_search_request(url,models.LicenseSearchResult,options,_filter)
return self._make_get_request(url,models.LicenseSearchResult,params,return_type)
def members(self,filter=None,n_rows=None,n_random=None,
offset=None,query=None,sort_by=None,order=None,
facet=None,cursor=None,select=None,return_type=None):
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=select)
url = self.BASE_URL + 'members/'
return self._make_get_request(url,models.MembersSearchResult,params,return_type)
def works_doi(self,doi,return_type=None):
"""
Examples
---------
from crossref.api import API
api = API()
doi = '10.1016/j.urology.2023.01.012'
result = api.works_doi(doi)
"""
params = {}
url = self.BASE_URL + f'works/{doi}/'
return self._make_get_request(url,None,params,return_type)
def works(self,
filter=None,
n_rows=None,
n_random=None,
offset=None,
query=None,
sort_by=None,
order=None,
facet=None,
cursor=None,
select=None,
return_type=None):
"""
Parameters
----------
options : QueryOptions
_filter : Filter
Returns
-------
crossref.models.WorkList
Invalid options
---------------
sample
Example
-------
TODO: Do we get a 'next' link?
TODO: Make sure the model methods show in the display ...
"""
params = self._options_to_dict(filter=filter,n_rows=n_rows,
n_random=n_random,offset=offset,query=query,
sort_by=sort_by,order=order,facet=facet,cursor=cursor,
select=select)
url = self.BASE_URL + 'works'
#return self._make_search_request(url,models.WorksSearchResult,options,_filter)
return self._make_get_request(url,models.WorksSearchResult,params,return_type)
def work_types(self):
"""
TODO: Cache this response
"""
if self._work_types is None:
url = self.BASE_URL + 'types'
self._work_types = self._make_get_request(url,models.TypesList,None,None)
return self._work_types
def doi_info(self,doi):
"""
Returns
-------
crossref.models.Work
If the DOI is not found the errors.InvalidDOI exception is raised.
Example
-------
import crossref
api = crossref.API()
m = api.doi_info('10.1109/TNSRE.2011.2163145')
"""
doi = _clean_doi(doi)
url = self.BASE_URL + 'works/' + doi
try:
return self._make_get_request(url,models.work_single)
except errors.RequestError:
#TODO: Check for 404
#last_response.status_code
#TODO: Do this only if debugging is enabled
if self.debug:
#TODO: Also report code
print("Error msg from server: " + self.last_response.text)
raise errors.InvalidDOI('Invalid DOI requested: ' + doi)
#return self._make_get_request(url,models.Work,kwargs)
def funder_info(self,funder_id):
"""
Example Data
------------
"""
url = self.BASE_URL + 'funders/' + funder_id
return self._make_get_request(url,models.funder_single)
def journal_info(self,journal_id):
"""
Example Data
------------
"""
url = self.BASE_URL + 'journals/' + journal_id
return self._make_get_request(url,models.journal_single)
def member_info(self,member_id):
"""
Example Data
------------
<class 'crossref.models.Member'>:
last_status_check_time: 1522803773023
primary_name: Wiley-Blackwell
counts: <dict> with 3 fields
breakdowns: <dict> with 1 fields
prefixes: <list> len 33
coverage: <dict> with 18 fields
prefix: <list> len 33
id: 311
tokens: ['wiley', 'blackwell']
flags: <dict> with 20 fields
location: 111 River Street Hoboken NJ 07...
names: <list> len 33
"""
url = self.BASE_URL + 'members/' + member_id
return self._make_get_request(url,models.member_single)
def prefix_info(self,prefix_id):
"""
Returns metadata for the DOI owner prefix
Returns
-------
crossref.models.Prefix
Implements
----------
/prefixes/{owner_prefix}
Example Data
------------
<class 'crossref.models.Prefix'>:
member: http://id.crossref.org/member/311
name: Wiley-Blackwell
prefix: http://id.crossref.org/prefix/10.1002
"""
url = self.BASE_URL + 'prefixes/' + prefix_id
return self._make_get_request(url,models.prefix_single)
def work_type_info(self,type_id):
"""
This doesn't seem to be all that useful, since it just returns
the subset of work_types()
Example
-------
api.work_type_info('journal')
e.g. {'id': 'journal', 'label': 'Journal'}
"""
url = self.BASE_URL + 'types/' + type_id
return self._make_get_request(url,models.pass_through)
def __repr__(self):
"""
self.debug = debug
if session is None:
self.session = requests.Session()
else:
self.session = session
self._work_types = None
self.last_error = None
self.rate_limit = 50
self.rate_limit_interval = 1
Returns
-------
TYPE
DESCRIPTION.
"""
pv = [
'debug',self.debug,
'session',cld(self.session),
'last_error',cld(self.last_error),
'rate_limit',self.rate_limit,
'rate_limit_interval',self.rate_limit_interval,
'retrieval',cld(self.retrieval),
'methods()','------------------------',
]
return utils.property_values_to_string(pv)
def _validate_config(user_config):
#1) Validate email
if hasattr(user_config,'email') and len(user_config.email) > 0:
pass
else:
raise Exception("Invalid email, email required in user_config")
_validate_config(user_config)
def _clean_doi(input_doi):
"""
This code was borrowed from:
https://github.com/Impactstory/total-impact-core/
"""
"""
Forms
-----
http://dx.doi.org/10.
doi:
10.
"""
#Hopefully we don't need to worry about this for now ...
#input_doi = remove_nonprinting_characters(input_doi)
input_doi = input_doi.lower()
if input_doi.startswith("http"):
match = re.match("^https*://(dx\.)*doi.org/(10\..+)", input_doi)
doi = match.group(2)
elif "doi.org" in input_doi:
match = re.match("^(dx\.)*doi.org/(10\..+)", input_doi)
doi = match.group(2)
elif input_doi.startswith("doi:"):
match = re.match("^doi:(10\..+)", input_doi)
doi = match.group(1)
elif input_doi.startswith("10."):
doi = input_doi
elif "10." in input_doi:
match = re.match(".*(10\.\d+.+)", input_doi, re.DOTALL)
doi = match.group(1)
else:
raise Exception('Unable to clean DOI: %s'%input_doi)
return doi | _options_to_dict | identifier_name |
scaledobject_controller.go | package controllers
import (
"context"
"fmt"
"sync"
"github.com/go-logr/logr"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
kedav1alpha1 "github.com/kedacore/keda/api/v1alpha1"
kedacontrollerutil "github.com/kedacore/keda/controllers/util"
"github.com/kedacore/keda/pkg/scaling"
kedautil "github.com/kedacore/keda/pkg/util"
)
// +kubebuilder:rbac:groups=keda.sh,resources=scaledobjects;scaledobjects/finalizers;scaledobjects/status,verbs="*"
// +kubebuilder:rbac:groups=keda.sh,resources=triggerauthentications;triggerauthentications/status,verbs="*"
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs="*"
// +kubebuilder:rbac:groups="",resources=configmaps;configmaps/status;events,verbs="*"
// +kubebuilder:rbac:groups="",resources=pods;services;services;secrets;external,verbs=get;list;watch
// +kubebuilder:rbac:groups="*",resources="*/scale",verbs="*"
// +kubebuilder:rbac:groups="*",resources="*",verbs=get
// ScaledObjectReconciler reconciles a ScaledObject object
type ScaledObjectReconciler struct {
Log logr.Logger
Client client.Client
Scheme *runtime.Scheme
scaleClient *scale.ScalesGetter
restMapper meta.RESTMapper
scaledObjectsGenerations *sync.Map
scaleHandler scaling.ScaleHandler
kubeVersion kedautil.K8sVersion
}
// SetupWithManager initializes the ScaledObjectReconciler instance and starts a new controller managed by the passed Manager instance.
func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager) error {
// create Discovery clientset
clientset, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
r.Log.Error(err, "Not able to create Discovery clientset")
return err
}
// Find out Kubernetes version
version, err := clientset.ServerVersion()
if err == nil {
r.kubeVersion = kedautil.NewK8sVersion(version)
r.Log.Info("Running on Kubernetes "+r.kubeVersion.PrettyVersion, "version", version)
} else {
r.Log.Error(err, "Not able to get Kubernetes version")
}
// Create Scale Client
scaleClient := initScaleClient(mgr, clientset)
r.scaleClient = &scaleClient
// Init the rest of ScaledObjectReconciler
r.restMapper = mgr.GetRESTMapper()
r.scaledObjectsGenerations = &sync.Map{}
r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), r.scaleClient, mgr.GetScheme())
// Start controller
return ctrl.NewControllerManagedBy(mgr).
// predicate.GenerationChangedPredicate{} ignore updates to ScaledObject Status
// (in this case metadata.Generation does not change)
// so reconcile loop is not started on Status updates
For(&kedav1alpha1.ScaledObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).
Owns(&autoscalingv2beta2.HorizontalPodAutoscaler{}).
Complete(r)
}
func initScaleClient(mgr manager.Manager, clientset *discovery.DiscoveryClient) scale.ScalesGetter {
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(clientset)
return scale.New(
clientset.RESTClient(), mgr.GetRESTMapper(),
dynamic.LegacyAPIPathResolverFunc,
scaleKindResolver,
)
}
// Reconcile performs reconciliation on the identified ScaledObject resource based on the request information passed, returns the result and an error (if any).
func (r *ScaledObjectReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
reqLogger := r.Log.WithValues("ScaledObject.Namespace", req.Namespace, "ScaledObject.Name", req.Name)
// Fetch the ScaledObject instance
scaledObject := &kedav1alpha1.ScaledObject{}
err := r.Client.Get(context.TODO(), req.NamespacedName, scaledObject)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
// Error reading the object - requeue the request.
reqLogger.Error(err, "Failed to get ScaledObject")
return ctrl.Result{}, err
}
reqLogger.Info("Reconciling ScaledObject")
// Check if the ScaledObject instance is marked to be deleted, which is
// indicated by the deletion timestamp being set.
if scaledObject.GetDeletionTimestamp() != nil {
return ctrl.Result{}, r.finalizeScaledObject(reqLogger, scaledObject)
}
// ensure finalizer is set on this CR
if err := r.ensureFinalizer(reqLogger, scaledObject); err != nil {
return ctrl.Result{}, err
}
// ensure Status Conditions are initialized
if !scaledObject.Status.Conditions.AreInitialized() {
conditions := kedav1alpha1.GetInitializedConditions()
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, conditions)
}
// reconcile ScaledObject and set status appropriately
msg, err := r.reconcileScaledObject(reqLogger, scaledObject)
conditions := scaledObject.Status.Conditions.DeepCopy()
if err != nil {
reqLogger.Error(err, msg)
conditions.SetReadyCondition(metav1.ConditionFalse, "ScaledObjectCheckFailed", msg)
conditions.SetActiveCondition(metav1.ConditionUnknown, "UnkownState", "ScaledObject check failed")
} else {
reqLogger.V(1).Info(msg)
conditions.SetReadyCondition(metav1.ConditionTrue, "ScaledObjectReady", msg)
}
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, &conditions)
return ctrl.Result{}, err
}
// reconcileScaledObject implements reconciler logic for ScaleObject
func (r *ScaledObjectReconciler) reconcileScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (string, error) {
// Check scale target Name is specified
if scaledObject.Spec.ScaleTargetRef.Name == "" {
err := fmt.Errorf("ScaledObject.spec.scaleTargetRef.name is missing")
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Check the label needed for Metrics servers is present on ScaledObject
err := r.ensureScaledObjectLabel(logger, scaledObject)
if err != nil {
return "Failed to update ScaledObject with scaledObjectName label", err
}
// Check if resource targeted for scaling exists and exposes /scale subresource
gvkr, err := r.checkTargetResourceIsScalable(logger, scaledObject)
if err != nil {
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Create a new HPA or update existing one according to ScaledObject
newHPACreated, err := r.ensureHPAForScaledObjectExists(logger, scaledObject, &gvkr)
if err != nil {
return "Failed to ensure HPA is correctly created for ScaledObject", err
}
scaleObjectSpecChanged := false
if !newHPACreated {
// Lets Check whether ScaledObject generation was changed, ie. there were changes in ScaledObject.Spec
// if it was changed we should start a new ScaleLoop
// (we can omit this check if a new HPA was created, which fires new ScaleLoop anyway)
scaleObjectSpecChanged, err = r.scaledObjectGenerationChanged(logger, scaledObject)
if err != nil {
return "Failed to check whether ScaledObject's Generation was changed", err
}
}
// Notify ScaleHandler if a new HPA was created or if ScaledObject was updated
if newHPACreated || scaleObjectSpecChanged {
if r.requestScaleLoop(logger, scaledObject) != nil {
return "Failed to start a new scale loop with scaling logic", err
}
logger.Info("Initializing Scaling logic according to ScaledObject Specification")
}
return "ScaledObject is defined correctly and is ready for scaling", nil | const labelScaledObjectName = "scaledObjectName"
if scaledObject.Labels == nil {
scaledObject.Labels = map[string]string{labelScaledObjectName: scaledObject.Name}
} else {
value, found := scaledObject.Labels[labelScaledObjectName]
if found && value == scaledObject.Name {
return nil
}
scaledObject.Labels[labelScaledObjectName] = scaledObject.Name
}
logger.V(1).Info("Adding scaledObjectName label on ScaledObject", "value", scaledObject.Name)
return r.Client.Update(context.TODO(), scaledObject)
}
// checkTargetResourceIsScalable checks if resource targeted for scaling exists and exposes /scale subresource
func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) {
gvkr, err := kedautil.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.APIVersion, scaledObject.Spec.ScaleTargetRef.Kind)
if err != nil {
logger.Error(err, "Failed to parse Group, Version, Kind, Resource", "apiVersion", scaledObject.Spec.ScaleTargetRef.APIVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind)
return gvkr, err
}
gvkString := gvkr.GVKString()
logger.V(1).Info("Parsed Group, Version, Kind, Resource", "GVK", gvkString, "Resource", gvkr.Resource)
// let's try to detect /scale subresource
scale, errScale := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), gvkr.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
if errScale != nil {
// not able to get /scale subresource -> let's check if the resource even exist in the cluster
unstruct := &unstructured.Unstructured{}
unstruct.SetGroupVersionKind(gvkr.GroupVersionKind())
if err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil {
// resource doesn't exist
logger.Error(err, "Target resource doesn't exist", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, err
}
// resource exist but doesn't expose /scale subresource
logger.Error(errScale, "Target resource doesn't expose /scale subresource", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, errScale
}
// if it is not already present in ScaledObject Status:
// - store discovered GVK and GVKR
// - store original scaleTarget's replica count (before scaling with KEDA)
if scaledObject.Status.ScaleTargetKind != gvkString || scaledObject.Status.OriginalReplicaCount == nil {
status := scaledObject.Status.DeepCopy()
if scaledObject.Status.ScaleTargetKind != gvkString {
status.ScaleTargetKind = gvkString
status.ScaleTargetGVKR = &gvkr
}
if scaledObject.Status.OriginalReplicaCount == nil {
status.OriginalReplicaCount = &scale.Spec.Replicas
}
if err := kedacontrollerutil.UpdateScaledObjectStatus(r.Client, logger, scaledObject, status); err != nil {
return gvkr, err
}
logger.Info("Detected resource targeted for scaling", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
}
return gvkr, nil
}
// ensureHPAForScaledObjectExists ensures that in cluster exist up-to-date HPA for specified ScaledObject, returns true if a new HPA was created
func (r *ScaledObjectReconciler) ensureHPAForScaledObjectExists(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, gvkr *kedav1alpha1.GroupVersionKindResource) (bool, error) {
hpaName := getHPAName(scaledObject)
foundHpa := &autoscalingv2beta2.HorizontalPodAutoscaler{}
// Check if HPA for this ScaledObject already exists
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: hpaName, Namespace: scaledObject.Namespace}, foundHpa)
if err != nil && errors.IsNotFound(err) {
// HPA wasn't found -> let's create a new one
err = r.createAndDeployNewHPA(logger, scaledObject, gvkr)
if err != nil {
return false, err
}
// check if scaledObject.spec.behavior was defined, because it is supported only on k8s >= 1.18
r.checkMinK8sVersionforHPABehavior(logger, scaledObject)
// new HPA created successfully -> notify Reconcile function so it could fire a new ScaleLoop
return true, nil
} else if err != nil {
logger.Error(err, "Failed to get HPA from cluster")
return false, err
}
// HPA was found -> let's check if we need to update it
err = r.updateHPAIfNeeded(logger, scaledObject, foundHpa, gvkr)
if err != nil {
logger.Error(err, "Failed to check HPA for possible update")
return false, err
}
return false, nil
}
// startScaleLoop starts ScaleLoop handler for the respective ScaledObject
func (r *ScaledObjectReconciler) requestScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
logger.V(1).Info("Notify scaleHandler of an update in scaledObject")
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err = r.scaleHandler.HandleScalableObject(scaledObject); err != nil {
return err
}
// store ScaledObject's current Generation
r.scaledObjectsGenerations.Store(key, scaledObject.Generation)
return nil
}
// stopScaleLoop stops ScaleLoop handler for the respective ScaleObject
func (r *ScaledObjectReconciler) stopScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err := r.scaleHandler.DeleteScalableObject(scaledObject); err != nil {
return err
}
// delete ScaledObject's current Generation
r.scaledObjectsGenerations.Delete(key)
return nil
}
// scaledObjectGenerationChanged returns true if ScaledObject's Generation was changed, ie. ScaledObject.Spec was changed
func (r *ScaledObjectReconciler) scaledObjectGenerationChanged(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (bool, error) {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return true, err
}
value, loaded := r.scaledObjectsGenerations.Load(key)
if loaded {
generation := value.(int64)
if generation == scaledObject.Generation {
return false, nil
}
}
return true, nil
} | }
// ensureScaledObjectLabel ensures that scaledObjectName=<scaledObject.Name> label exist in the ScaledObject
// This is how the MetricsAdapter will know which ScaledObject a metric is for when the HPA queries it.
func (r *ScaledObjectReconciler) ensureScaledObjectLabel(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { | random_line_split |
scaledobject_controller.go | package controllers
import (
"context"
"fmt"
"sync"
"github.com/go-logr/logr"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
kedav1alpha1 "github.com/kedacore/keda/api/v1alpha1"
kedacontrollerutil "github.com/kedacore/keda/controllers/util"
"github.com/kedacore/keda/pkg/scaling"
kedautil "github.com/kedacore/keda/pkg/util"
)
// +kubebuilder:rbac:groups=keda.sh,resources=scaledobjects;scaledobjects/finalizers;scaledobjects/status,verbs="*"
// +kubebuilder:rbac:groups=keda.sh,resources=triggerauthentications;triggerauthentications/status,verbs="*"
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs="*"
// +kubebuilder:rbac:groups="",resources=configmaps;configmaps/status;events,verbs="*"
// +kubebuilder:rbac:groups="",resources=pods;services;services;secrets;external,verbs=get;list;watch
// +kubebuilder:rbac:groups="*",resources="*/scale",verbs="*"
// +kubebuilder:rbac:groups="*",resources="*",verbs=get
// ScaledObjectReconciler reconciles a ScaledObject object
type ScaledObjectReconciler struct {
Log logr.Logger
Client client.Client
Scheme *runtime.Scheme
scaleClient *scale.ScalesGetter
restMapper meta.RESTMapper
scaledObjectsGenerations *sync.Map
scaleHandler scaling.ScaleHandler
kubeVersion kedautil.K8sVersion
}
// SetupWithManager initializes the ScaledObjectReconciler instance and starts a new controller managed by the passed Manager instance.
func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager) error {
// create Discovery clientset
clientset, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
r.Log.Error(err, "Not able to create Discovery clientset")
return err
}
// Find out Kubernetes version
version, err := clientset.ServerVersion()
if err == nil {
r.kubeVersion = kedautil.NewK8sVersion(version)
r.Log.Info("Running on Kubernetes "+r.kubeVersion.PrettyVersion, "version", version)
} else {
r.Log.Error(err, "Not able to get Kubernetes version")
}
// Create Scale Client
scaleClient := initScaleClient(mgr, clientset)
r.scaleClient = &scaleClient
// Init the rest of ScaledObjectReconciler
r.restMapper = mgr.GetRESTMapper()
r.scaledObjectsGenerations = &sync.Map{}
r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), r.scaleClient, mgr.GetScheme())
// Start controller
return ctrl.NewControllerManagedBy(mgr).
// predicate.GenerationChangedPredicate{} ignore updates to ScaledObject Status
// (in this case metadata.Generation does not change)
// so reconcile loop is not started on Status updates
For(&kedav1alpha1.ScaledObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).
Owns(&autoscalingv2beta2.HorizontalPodAutoscaler{}).
Complete(r)
}
func initScaleClient(mgr manager.Manager, clientset *discovery.DiscoveryClient) scale.ScalesGetter {
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(clientset)
return scale.New(
clientset.RESTClient(), mgr.GetRESTMapper(),
dynamic.LegacyAPIPathResolverFunc,
scaleKindResolver,
)
}
// Reconcile performs reconciliation on the identified ScaledObject resource based on the request information passed, returns the result and an error (if any).
func (r *ScaledObjectReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
reqLogger := r.Log.WithValues("ScaledObject.Namespace", req.Namespace, "ScaledObject.Name", req.Name)
// Fetch the ScaledObject instance
scaledObject := &kedav1alpha1.ScaledObject{}
err := r.Client.Get(context.TODO(), req.NamespacedName, scaledObject)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
// Error reading the object - requeue the request.
reqLogger.Error(err, "Failed to get ScaledObject")
return ctrl.Result{}, err
}
reqLogger.Info("Reconciling ScaledObject")
// Check if the ScaledObject instance is marked to be deleted, which is
// indicated by the deletion timestamp being set.
if scaledObject.GetDeletionTimestamp() != nil {
return ctrl.Result{}, r.finalizeScaledObject(reqLogger, scaledObject)
}
// ensure finalizer is set on this CR
if err := r.ensureFinalizer(reqLogger, scaledObject); err != nil {
return ctrl.Result{}, err
}
// ensure Status Conditions are initialized
if !scaledObject.Status.Conditions.AreInitialized() {
conditions := kedav1alpha1.GetInitializedConditions()
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, conditions)
}
// reconcile ScaledObject and set status appropriately
msg, err := r.reconcileScaledObject(reqLogger, scaledObject)
conditions := scaledObject.Status.Conditions.DeepCopy()
if err != nil {
reqLogger.Error(err, msg)
conditions.SetReadyCondition(metav1.ConditionFalse, "ScaledObjectCheckFailed", msg)
conditions.SetActiveCondition(metav1.ConditionUnknown, "UnkownState", "ScaledObject check failed")
} else {
reqLogger.V(1).Info(msg)
conditions.SetReadyCondition(metav1.ConditionTrue, "ScaledObjectReady", msg)
}
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, &conditions)
return ctrl.Result{}, err
}
// reconcileScaledObject implements reconciler logic for ScaleObject
func (r *ScaledObjectReconciler) reconcileScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (string, error) {
// Check scale target Name is specified
if scaledObject.Spec.ScaleTargetRef.Name == "" {
err := fmt.Errorf("ScaledObject.spec.scaleTargetRef.name is missing")
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Check the label needed for Metrics servers is present on ScaledObject
err := r.ensureScaledObjectLabel(logger, scaledObject)
if err != nil {
return "Failed to update ScaledObject with scaledObjectName label", err
}
// Check if resource targeted for scaling exists and exposes /scale subresource
gvkr, err := r.checkTargetResourceIsScalable(logger, scaledObject)
if err != nil {
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Create a new HPA or update existing one according to ScaledObject
newHPACreated, err := r.ensureHPAForScaledObjectExists(logger, scaledObject, &gvkr)
if err != nil {
return "Failed to ensure HPA is correctly created for ScaledObject", err
}
scaleObjectSpecChanged := false
if !newHPACreated {
// Lets Check whether ScaledObject generation was changed, ie. there were changes in ScaledObject.Spec
// if it was changed we should start a new ScaleLoop
// (we can omit this check if a new HPA was created, which fires new ScaleLoop anyway)
scaleObjectSpecChanged, err = r.scaledObjectGenerationChanged(logger, scaledObject)
if err != nil {
return "Failed to check whether ScaledObject's Generation was changed", err
}
}
// Notify ScaleHandler if a new HPA was created or if ScaledObject was updated
if newHPACreated || scaleObjectSpecChanged {
if r.requestScaleLoop(logger, scaledObject) != nil {
return "Failed to start a new scale loop with scaling logic", err
}
logger.Info("Initializing Scaling logic according to ScaledObject Specification")
}
return "ScaledObject is defined correctly and is ready for scaling", nil
}
// ensureScaledObjectLabel ensures that scaledObjectName=<scaledObject.Name> label exist in the ScaledObject
// This is how the MetricsAdapter will know which ScaledObject a metric is for when the HPA queries it.
func (r *ScaledObjectReconciler) ensureScaledObjectLabel(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
const labelScaledObjectName = "scaledObjectName"
if scaledObject.Labels == nil {
scaledObject.Labels = map[string]string{labelScaledObjectName: scaledObject.Name}
} else {
value, found := scaledObject.Labels[labelScaledObjectName]
if found && value == scaledObject.Name {
return nil
}
scaledObject.Labels[labelScaledObjectName] = scaledObject.Name
}
logger.V(1).Info("Adding scaledObjectName label on ScaledObject", "value", scaledObject.Name)
return r.Client.Update(context.TODO(), scaledObject)
}
// checkTargetResourceIsScalable checks if resource targeted for scaling exists and exposes /scale subresource
func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) {
gvkr, err := kedautil.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.APIVersion, scaledObject.Spec.ScaleTargetRef.Kind)
if err != nil {
logger.Error(err, "Failed to parse Group, Version, Kind, Resource", "apiVersion", scaledObject.Spec.ScaleTargetRef.APIVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind)
return gvkr, err
}
gvkString := gvkr.GVKString()
logger.V(1).Info("Parsed Group, Version, Kind, Resource", "GVK", gvkString, "Resource", gvkr.Resource)
// let's try to detect /scale subresource
scale, errScale := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), gvkr.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
if errScale != nil {
// not able to get /scale subresource -> let's check if the resource even exist in the cluster
unstruct := &unstructured.Unstructured{}
unstruct.SetGroupVersionKind(gvkr.GroupVersionKind())
if err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil {
// resource doesn't exist
logger.Error(err, "Target resource doesn't exist", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, err
}
// resource exist but doesn't expose /scale subresource
logger.Error(errScale, "Target resource doesn't expose /scale subresource", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, errScale
}
// if it is not already present in ScaledObject Status:
// - store discovered GVK and GVKR
// - store original scaleTarget's replica count (before scaling with KEDA)
if scaledObject.Status.ScaleTargetKind != gvkString || scaledObject.Status.OriginalReplicaCount == nil {
status := scaledObject.Status.DeepCopy()
if scaledObject.Status.ScaleTargetKind != gvkString {
status.ScaleTargetKind = gvkString
status.ScaleTargetGVKR = &gvkr
}
if scaledObject.Status.OriginalReplicaCount == nil |
if err := kedacontrollerutil.UpdateScaledObjectStatus(r.Client, logger, scaledObject, status); err != nil {
return gvkr, err
}
logger.Info("Detected resource targeted for scaling", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
}
return gvkr, nil
}
// ensureHPAForScaledObjectExists ensures that in cluster exist up-to-date HPA for specified ScaledObject, returns true if a new HPA was created
func (r *ScaledObjectReconciler) ensureHPAForScaledObjectExists(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, gvkr *kedav1alpha1.GroupVersionKindResource) (bool, error) {
hpaName := getHPAName(scaledObject)
foundHpa := &autoscalingv2beta2.HorizontalPodAutoscaler{}
// Check if HPA for this ScaledObject already exists
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: hpaName, Namespace: scaledObject.Namespace}, foundHpa)
if err != nil && errors.IsNotFound(err) {
// HPA wasn't found -> let's create a new one
err = r.createAndDeployNewHPA(logger, scaledObject, gvkr)
if err != nil {
return false, err
}
// check if scaledObject.spec.behavior was defined, because it is supported only on k8s >= 1.18
r.checkMinK8sVersionforHPABehavior(logger, scaledObject)
// new HPA created successfully -> notify Reconcile function so it could fire a new ScaleLoop
return true, nil
} else if err != nil {
logger.Error(err, "Failed to get HPA from cluster")
return false, err
}
// HPA was found -> let's check if we need to update it
err = r.updateHPAIfNeeded(logger, scaledObject, foundHpa, gvkr)
if err != nil {
logger.Error(err, "Failed to check HPA for possible update")
return false, err
}
return false, nil
}
// startScaleLoop starts ScaleLoop handler for the respective ScaledObject
func (r *ScaledObjectReconciler) requestScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
logger.V(1).Info("Notify scaleHandler of an update in scaledObject")
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err = r.scaleHandler.HandleScalableObject(scaledObject); err != nil {
return err
}
// store ScaledObject's current Generation
r.scaledObjectsGenerations.Store(key, scaledObject.Generation)
return nil
}
// stopScaleLoop stops ScaleLoop handler for the respective ScaleObject
func (r *ScaledObjectReconciler) stopScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err := r.scaleHandler.DeleteScalableObject(scaledObject); err != nil {
return err
}
// delete ScaledObject's current Generation
r.scaledObjectsGenerations.Delete(key)
return nil
}
// scaledObjectGenerationChanged returns true if ScaledObject's Generation was changed, ie. ScaledObject.Spec was changed
func (r *ScaledObjectReconciler) scaledObjectGenerationChanged(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (bool, error) {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return true, err
}
value, loaded := r.scaledObjectsGenerations.Load(key)
if loaded {
generation := value.(int64)
if generation == scaledObject.Generation {
return false, nil
}
}
return true, nil
}
| {
status.OriginalReplicaCount = &scale.Spec.Replicas
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.