file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
MessageSigner.go | // Package messaging for signing and encryption of messages
package messaging
import (
"crypto/ecdsa"
"crypto/rand"
"crypto/sha256"
"encoding/asn1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"reflect"
"github.com/iotdomain/iotdomain-go/types"
"github.com/sirupsen/logrus"
"gopkg.in/square/go-jose.v2"
)
// MessageSigner for signing and verifying of signed and encrypted messages
type MessageSigner struct {
// GetPublicKey when available is used in mess to verify signature
GetPublicKey func(address string) *ecdsa.PublicKey // must be a variable
messenger IMessenger
signMessages bool // flag, sign outgoing messages. Default is true. Disable for testing
privateKey *ecdsa.PrivateKey // private key for signing and decryption
}
// DecodeMessage decrypts the message and verifies the sender signature .
// The sender and signer of the message is contained the message 'sender' field. If the
// Sender field is missing then the 'address' field is used as sender.
// object must hold the expected message type to decode the json message containging the sender info
func (signer *MessageSigner) DecodeMessage(rawMessage string, object interface{}) (isEncrypted bool, isSigned bool, err error) {
dmessage, isEncrypted, err := DecryptMessage(rawMessage, signer.privateKey)
isSigned, err = VerifySenderJWSSignature(dmessage, object, signer.GetPublicKey)
return isEncrypted, isSigned, err
}
// SignMessages returns whether messages MUST be signed on sending or receiving
func (signer *MessageSigner) SignMessages() bool {
return signer.signMessages
}
// VerifySignedMessage parses and verifies the message signature
// as per standard, the sender and signer of the message is in the message 'Sender' field. If the
// Sender field is missing then the 'address' field contains the publisher.
// or 'address' field
func (signer *MessageSigner) VerifySignedMessage(rawMessage string, object interface{}) (isSigned bool, err error) {
isSigned, err = VerifySenderJWSSignature(rawMessage, object, signer.GetPublicKey)
return isSigned, err
}
// PublishObject encapsulates the message object in a payload, signs the message, and sends it.
// If an encryption key is provided then the signed message will be encrypted.
// The object to publish will be marshalled to JSON and signed by this publisher
func (signer *MessageSigner) PublishObject(address string, retained bool, object interface{}, encryptionKey *ecdsa.PublicKey) error {
// payload, err := json.Marshal(object)
payload, err := json.MarshalIndent(object, " ", " ")
if err != nil || object == nil {
errText := fmt.Sprintf("Publisher.publishMessage: Error marshalling message for address %s: %s", address, err)
return errors.New(errText)
}
if encryptionKey != nil {
err = signer.PublishEncrypted(address, retained, string(payload), encryptionKey)
} else {
err = signer.PublishSigned(address, retained, string(payload))
}
return err
}
// SetSignMessages enables or disables message signing. Intended for testing.
func (signer *MessageSigner) SetSignMessages(sign bool) {
signer.signMessages = sign
}
// Subscribe to messages on the given address
func (signer *MessageSigner) Subscribe(
address string,
handler func(address string, message string) error) {
signer.messenger.Subscribe(address, handler)
}
// Unsubscribe to messages on the given address
func (signer *MessageSigner) Unsubscribe(
address string,
handler func(address string, message string) error) {
signer.messenger.Unsubscribe(address, handler)
}
// PublishEncrypted sign and encrypts the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishEncrypted(
address string, retained bool, payload string, publicKey *ecdsa.PublicKey) error {
var err error
message := payload
// first sign, then encrypt as per RFC
if signer.signMessages {
message, _ = CreateJWSSignature(string(payload), signer.privateKey)
}
emessage, err := EncryptMessage(message, publicKey)
err = signer.messenger.Publish(address, retained, emessage)
return err
}
// PublishSigned sign the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishSigned(
address string, retained bool, payload string) error {
var err error
// default is unsigned
message := payload
if signer.signMessages {
message, err = CreateJWSSignature(string(payload), signer.privateKey)
if err != nil {
logrus.Errorf("Publisher.publishMessage: Error signing message for address %s: %s", address, err)
}
}
err = signer.messenger.Publish(address, retained, message)
return err
}
// NewMessageSigner creates a new instance for signing and verifying published messages
// If getPublicKey is not provided, verification of signature is skipped
func NewMessageSigner(messenger IMessenger, signingKey *ecdsa.PrivateKey,
getPublicKey func(address string) *ecdsa.PublicKey,
) *MessageSigner {
signer := &MessageSigner{
GetPublicKey: getPublicKey,
messenger: messenger,
signMessages: true,
privateKey: signingKey, // private key for signing
}
return signer
}
/*
* Helper Functions for signing and verification
*/
// CreateEcdsaSignature creates a ECDSA256 signature from the payload using the provided private key
// This returns a base64url encoded signature
func CreateEcdsaSignature(payload []byte, privateKey *ecdsa.PrivateKey) string {
if privateKey == nil {
return ""
}
hashed := sha256.Sum256(payload)
r, s, err := ecdsa.Sign(rand.Reader, privateKey, hashed[:])
if err != nil {
return ""
}
sig, err := asn1.Marshal(ECDSASignature{r, s})
return base64.URLEncoding.EncodeToString(sig)
}
// SignIdentity updates the base64URL encoded ECDSA256 signature of the public identity
func SignIdentity(publicIdent *types.PublisherIdentityMessage, privKey *ecdsa.PrivateKey) {
identCopy := *publicIdent
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
sigStr := CreateEcdsaSignature(payload, privKey)
publicIdent.IdentitySignature = sigStr
}
// CreateJWSSignature signs the payload using JSE ES256 and return the JSE compact serialized message
func CreateJWSSignature(payload string, privateKey *ecdsa.PrivateKey) (string, error) {
joseSigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: privateKey}, nil)
if err != nil {
return "", err
}
signedObject, err := joseSigner.Sign([]byte(payload))
if err != nil {
return "", err
}
// serialized := signedObject.FullSerialize()
serialized, err := signedObject.CompactSerialize()
return serialized, err
}
// DecryptMessage deserializes and decrypts the message using JWE
// This returns the decrypted message, or the input message if the message was not encrypted
func DecryptMessage(serialized string, privateKey *ecdsa.PrivateKey) (message string, isEncrypted bool, err error) {
message = serialized
decrypter, err := jose.ParseEncrypted(serialized)
if err == nil {
dmessage, err := decrypter.Decrypt(privateKey)
message = string(dmessage)
return message, true, err
}
return message, false, err
}
// EncryptMessage encrypts and serializes the message using JWE
func | (message string, publicKey *ecdsa.PublicKey) (serialized string, err error) {
var jwe *jose.JSONWebEncryption
recpnt := jose.Recipient{Algorithm: jose.ECDH_ES, Key: publicKey}
encrypter, err := jose.NewEncrypter(jose.A128CBC_HS256, recpnt, nil)
if encrypter != nil {
jwe, err = encrypter.Encrypt([]byte(message))
}
if err != nil {
return message, err
}
serialized, _ = jwe.CompactSerialize()
return serialized, err
}
// VerifyIdentitySignature verifies a base64URL encoded ECDSA256 signature in the identity
// against the identity itself using the sender's public key.
func VerifyIdentitySignature(ident *types.PublisherIdentityMessage, pubKey *ecdsa.PublicKey) error {
// the signing took place with the signature field empty
identCopy := *ident
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
err := VerifyEcdsaSignature(payload, ident.IdentitySignature, pubKey)
// signingKey := jose.SigningKey{Algorithm: jose.ES256, Key: privKey}
// joseSigner, _ := jose.NewSigner(signingKey, nil)
// jwsObject, _ := joseSigner.Verify(payload)
// sig := jwsObject.Signatures[0].Signature
// sigStr := base64.URLEncoding.EncodeToString(sig)
// return sigStr
return err
}
// VerifyEcdsaSignature the payload using the base64url encoded signature and public key
// payload is any raw data
// signatureB64urlEncoded is the ecdsa 256 URL encoded signature
// Intended for signing an object like the publisher identity. Use VerifyJWSMessage for
// verifying JWS signed messages.
func VerifyEcdsaSignature(payload []byte, signatureB64urlEncoded string, publicKey *ecdsa.PublicKey) error {
var rs ECDSASignature
if publicKey == nil {
return errors.New("VerifyEcdsaSignature: publicKey is nil")
}
signature, err := base64.URLEncoding.DecodeString(signatureB64urlEncoded)
if err != nil {
return errors.New("VerifyEcdsaSignature: Invalid signature")
}
if _, err = asn1.Unmarshal(signature, &rs); err != nil {
return errors.New("VerifyEcdsaSignature: Payload is not ASN")
}
hashed := sha256.Sum256(payload)
verified := ecdsa.Verify(publicKey, hashed[:], rs.R, rs.S)
if !verified {
return errors.New("VerifyEcdsaSignature: Signature does not match payload")
}
return nil
}
// VerifyJWSMessage verifies a signed message and returns its payload
// The message is a JWS encoded string. The public key of the sender is
// needed to verify the message.
// Intended for testing, as the application uses VerifySenderJWSSignature instead.
func VerifyJWSMessage(message string, publicKey *ecdsa.PublicKey) (payload string, err error) {
if publicKey == nil {
err := errors.New("VerifyJWSMessage: public key is nil")
return "", err
}
jwsSignature, err := jose.ParseSigned(message)
if err != nil {
return "", err
}
payloadB, err := jwsSignature.Verify(publicKey)
return string(payloadB), err
}
// VerifySenderJWSSignature verifies if a message is JWS signed. If signed then the signature is verified
// using the 'Sender' or 'Address' attributes to determine the public key to verify with.
// To verify correctly, the sender has to be a known publisher and verified with the DSS.
// object MUST be a pointer to the type otherwise unmarshal fails.
//
// getPublicKey is a lookup function for providing the public key from the given sender address.
// it should only provide a public key if the publisher is known and verified by the DSS, or
// if this zone does not use a DSS (publisher are protected through message bus ACLs)
// If not provided then signature verification will succeed.
//
// The rawMessage is json unmarshalled into the given object.
//
// This returns a flag if the message was signed and if so, an error if the verification failed
func VerifySenderJWSSignature(rawMessage string, object interface{}, getPublicKey func(address string) *ecdsa.PublicKey) (isSigned bool, err error) {
jwsSignature, err := jose.ParseSigned(rawMessage)
if err != nil {
// message is (probably) not signed, try to unmarshal it directly
err = json.Unmarshal([]byte(rawMessage), object)
return false, err
}
payload := jwsSignature.UnsafePayloadWithoutVerification()
err = json.Unmarshal([]byte(payload), object)
if err != nil {
// message doesn't have a json payload
errTxt := fmt.Sprintf("VerifySenderSignature: Signature okay but message unmarshal failed: %s", err)
return true, errors.New(errTxt)
}
// determine who the sender is
reflObject := reflect.ValueOf(object).Elem()
reflSender := reflObject.FieldByName("Sender")
if !reflSender.IsValid() {
reflSender = reflObject.FieldByName("Address")
if !reflSender.IsValid() {
err = errors.New("VerifySenderJWSSignature: object doesn't have a Sender or Address field")
return true, err
}
}
sender := reflSender.String()
if sender == "" {
err := errors.New("VerifySenderJWSSignature: Missing sender or address information in message")
return true, err
}
// verify the message signature using the sender's public key
if getPublicKey == nil {
return true, nil
}
publicKey := getPublicKey(sender)
if publicKey == nil {
err := errors.New("VerifySenderJWSSignature: No public key available for sender " + sender)
return true, err
}
_, err = jwsSignature.Verify(publicKey)
if err != nil {
msg := fmt.Sprintf("VerifySenderJWSSignature: message signature from %s fails to verify with its public key", sender)
err := errors.New(msg)
return true, err
}
return true, err
}
| EncryptMessage | identifier_name |
MessageSigner.go | // Package messaging for signing and encryption of messages
package messaging
import (
"crypto/ecdsa"
"crypto/rand"
"crypto/sha256"
"encoding/asn1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"reflect"
"github.com/iotdomain/iotdomain-go/types"
"github.com/sirupsen/logrus"
"gopkg.in/square/go-jose.v2"
)
// MessageSigner for signing and verifying of signed and encrypted messages
type MessageSigner struct {
// GetPublicKey when available is used in mess to verify signature
GetPublicKey func(address string) *ecdsa.PublicKey // must be a variable
messenger IMessenger
signMessages bool // flag, sign outgoing messages. Default is true. Disable for testing
privateKey *ecdsa.PrivateKey // private key for signing and decryption
}
// DecodeMessage decrypts the message and verifies the sender signature .
// The sender and signer of the message is contained the message 'sender' field. If the
// Sender field is missing then the 'address' field is used as sender.
// object must hold the expected message type to decode the json message containging the sender info
func (signer *MessageSigner) DecodeMessage(rawMessage string, object interface{}) (isEncrypted bool, isSigned bool, err error) {
dmessage, isEncrypted, err := DecryptMessage(rawMessage, signer.privateKey)
isSigned, err = VerifySenderJWSSignature(dmessage, object, signer.GetPublicKey)
return isEncrypted, isSigned, err
}
// SignMessages returns whether messages MUST be signed on sending or receiving
func (signer *MessageSigner) SignMessages() bool {
return signer.signMessages
}
// VerifySignedMessage parses and verifies the message signature
// as per standard, the sender and signer of the message is in the message 'Sender' field. If the
// Sender field is missing then the 'address' field contains the publisher.
// or 'address' field
func (signer *MessageSigner) VerifySignedMessage(rawMessage string, object interface{}) (isSigned bool, err error) {
isSigned, err = VerifySenderJWSSignature(rawMessage, object, signer.GetPublicKey)
return isSigned, err
}
// PublishObject encapsulates the message object in a payload, signs the message, and sends it.
// If an encryption key is provided then the signed message will be encrypted.
// The object to publish will be marshalled to JSON and signed by this publisher
func (signer *MessageSigner) PublishObject(address string, retained bool, object interface{}, encryptionKey *ecdsa.PublicKey) error {
// payload, err := json.Marshal(object)
payload, err := json.MarshalIndent(object, " ", " ")
if err != nil || object == nil {
errText := fmt.Sprintf("Publisher.publishMessage: Error marshalling message for address %s: %s", address, err)
return errors.New(errText)
}
if encryptionKey != nil {
err = signer.PublishEncrypted(address, retained, string(payload), encryptionKey)
} else {
err = signer.PublishSigned(address, retained, string(payload))
}
return err
}
// SetSignMessages enables or disables message signing. Intended for testing.
func (signer *MessageSigner) SetSignMessages(sign bool) {
signer.signMessages = sign
}
// Subscribe to messages on the given address
func (signer *MessageSigner) Subscribe(
address string,
handler func(address string, message string) error) |
// Unsubscribe to messages on the given address
func (signer *MessageSigner) Unsubscribe(
address string,
handler func(address string, message string) error) {
signer.messenger.Unsubscribe(address, handler)
}
// PublishEncrypted sign and encrypts the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishEncrypted(
address string, retained bool, payload string, publicKey *ecdsa.PublicKey) error {
var err error
message := payload
// first sign, then encrypt as per RFC
if signer.signMessages {
message, _ = CreateJWSSignature(string(payload), signer.privateKey)
}
emessage, err := EncryptMessage(message, publicKey)
err = signer.messenger.Publish(address, retained, emessage)
return err
}
// PublishSigned sign the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishSigned(
address string, retained bool, payload string) error {
var err error
// default is unsigned
message := payload
if signer.signMessages {
message, err = CreateJWSSignature(string(payload), signer.privateKey)
if err != nil {
logrus.Errorf("Publisher.publishMessage: Error signing message for address %s: %s", address, err)
}
}
err = signer.messenger.Publish(address, retained, message)
return err
}
// NewMessageSigner creates a new instance for signing and verifying published messages
// If getPublicKey is not provided, verification of signature is skipped
func NewMessageSigner(messenger IMessenger, signingKey *ecdsa.PrivateKey,
getPublicKey func(address string) *ecdsa.PublicKey,
) *MessageSigner {
signer := &MessageSigner{
GetPublicKey: getPublicKey,
messenger: messenger,
signMessages: true,
privateKey: signingKey, // private key for signing
}
return signer
}
/*
* Helper Functions for signing and verification
*/
// CreateEcdsaSignature creates a ECDSA256 signature from the payload using the provided private key
// This returns a base64url encoded signature
func CreateEcdsaSignature(payload []byte, privateKey *ecdsa.PrivateKey) string {
if privateKey == nil {
return ""
}
hashed := sha256.Sum256(payload)
r, s, err := ecdsa.Sign(rand.Reader, privateKey, hashed[:])
if err != nil {
return ""
}
sig, err := asn1.Marshal(ECDSASignature{r, s})
return base64.URLEncoding.EncodeToString(sig)
}
// SignIdentity updates the base64URL encoded ECDSA256 signature of the public identity
func SignIdentity(publicIdent *types.PublisherIdentityMessage, privKey *ecdsa.PrivateKey) {
identCopy := *publicIdent
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
sigStr := CreateEcdsaSignature(payload, privKey)
publicIdent.IdentitySignature = sigStr
}
// CreateJWSSignature signs the payload using JSE ES256 and return the JSE compact serialized message
func CreateJWSSignature(payload string, privateKey *ecdsa.PrivateKey) (string, error) {
joseSigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: privateKey}, nil)
if err != nil {
return "", err
}
signedObject, err := joseSigner.Sign([]byte(payload))
if err != nil {
return "", err
}
// serialized := signedObject.FullSerialize()
serialized, err := signedObject.CompactSerialize()
return serialized, err
}
// DecryptMessage deserializes and decrypts the message using JWE
// This returns the decrypted message, or the input message if the message was not encrypted
func DecryptMessage(serialized string, privateKey *ecdsa.PrivateKey) (message string, isEncrypted bool, err error) {
message = serialized
decrypter, err := jose.ParseEncrypted(serialized)
if err == nil {
dmessage, err := decrypter.Decrypt(privateKey)
message = string(dmessage)
return message, true, err
}
return message, false, err
}
// EncryptMessage encrypts and serializes the message using JWE
func EncryptMessage(message string, publicKey *ecdsa.PublicKey) (serialized string, err error) {
var jwe *jose.JSONWebEncryption
recpnt := jose.Recipient{Algorithm: jose.ECDH_ES, Key: publicKey}
encrypter, err := jose.NewEncrypter(jose.A128CBC_HS256, recpnt, nil)
if encrypter != nil {
jwe, err = encrypter.Encrypt([]byte(message))
}
if err != nil {
return message, err
}
serialized, _ = jwe.CompactSerialize()
return serialized, err
}
// VerifyIdentitySignature verifies a base64URL encoded ECDSA256 signature in the identity
// against the identity itself using the sender's public key.
func VerifyIdentitySignature(ident *types.PublisherIdentityMessage, pubKey *ecdsa.PublicKey) error {
// the signing took place with the signature field empty
identCopy := *ident
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
err := VerifyEcdsaSignature(payload, ident.IdentitySignature, pubKey)
// signingKey := jose.SigningKey{Algorithm: jose.ES256, Key: privKey}
// joseSigner, _ := jose.NewSigner(signingKey, nil)
// jwsObject, _ := joseSigner.Verify(payload)
// sig := jwsObject.Signatures[0].Signature
// sigStr := base64.URLEncoding.EncodeToString(sig)
// return sigStr
return err
}
// VerifyEcdsaSignature the payload using the base64url encoded signature and public key
// payload is any raw data
// signatureB64urlEncoded is the ecdsa 256 URL encoded signature
// Intended for signing an object like the publisher identity. Use VerifyJWSMessage for
// verifying JWS signed messages.
func VerifyEcdsaSignature(payload []byte, signatureB64urlEncoded string, publicKey *ecdsa.PublicKey) error {
var rs ECDSASignature
if publicKey == nil {
return errors.New("VerifyEcdsaSignature: publicKey is nil")
}
signature, err := base64.URLEncoding.DecodeString(signatureB64urlEncoded)
if err != nil {
return errors.New("VerifyEcdsaSignature: Invalid signature")
}
if _, err = asn1.Unmarshal(signature, &rs); err != nil {
return errors.New("VerifyEcdsaSignature: Payload is not ASN")
}
hashed := sha256.Sum256(payload)
verified := ecdsa.Verify(publicKey, hashed[:], rs.R, rs.S)
if !verified {
return errors.New("VerifyEcdsaSignature: Signature does not match payload")
}
return nil
}
// VerifyJWSMessage verifies a signed message and returns its payload
// The message is a JWS encoded string. The public key of the sender is
// needed to verify the message.
// Intended for testing, as the application uses VerifySenderJWSSignature instead.
func VerifyJWSMessage(message string, publicKey *ecdsa.PublicKey) (payload string, err error) {
if publicKey == nil {
err := errors.New("VerifyJWSMessage: public key is nil")
return "", err
}
jwsSignature, err := jose.ParseSigned(message)
if err != nil {
return "", err
}
payloadB, err := jwsSignature.Verify(publicKey)
return string(payloadB), err
}
// VerifySenderJWSSignature verifies if a message is JWS signed. If signed then the signature is verified
// using the 'Sender' or 'Address' attributes to determine the public key to verify with.
// To verify correctly, the sender has to be a known publisher and verified with the DSS.
// object MUST be a pointer to the type otherwise unmarshal fails.
//
// getPublicKey is a lookup function for providing the public key from the given sender address.
// it should only provide a public key if the publisher is known and verified by the DSS, or
// if this zone does not use a DSS (publisher are protected through message bus ACLs)
// If not provided then signature verification will succeed.
//
// The rawMessage is json unmarshalled into the given object.
//
// This returns a flag if the message was signed and if so, an error if the verification failed
func VerifySenderJWSSignature(rawMessage string, object interface{}, getPublicKey func(address string) *ecdsa.PublicKey) (isSigned bool, err error) {
jwsSignature, err := jose.ParseSigned(rawMessage)
if err != nil {
// message is (probably) not signed, try to unmarshal it directly
err = json.Unmarshal([]byte(rawMessage), object)
return false, err
}
payload := jwsSignature.UnsafePayloadWithoutVerification()
err = json.Unmarshal([]byte(payload), object)
if err != nil {
// message doesn't have a json payload
errTxt := fmt.Sprintf("VerifySenderSignature: Signature okay but message unmarshal failed: %s", err)
return true, errors.New(errTxt)
}
// determine who the sender is
reflObject := reflect.ValueOf(object).Elem()
reflSender := reflObject.FieldByName("Sender")
if !reflSender.IsValid() {
reflSender = reflObject.FieldByName("Address")
if !reflSender.IsValid() {
err = errors.New("VerifySenderJWSSignature: object doesn't have a Sender or Address field")
return true, err
}
}
sender := reflSender.String()
if sender == "" {
err := errors.New("VerifySenderJWSSignature: Missing sender or address information in message")
return true, err
}
// verify the message signature using the sender's public key
if getPublicKey == nil {
return true, nil
}
publicKey := getPublicKey(sender)
if publicKey == nil {
err := errors.New("VerifySenderJWSSignature: No public key available for sender " + sender)
return true, err
}
_, err = jwsSignature.Verify(publicKey)
if err != nil {
msg := fmt.Sprintf("VerifySenderJWSSignature: message signature from %s fails to verify with its public key", sender)
err := errors.New(msg)
return true, err
}
return true, err
}
| {
signer.messenger.Subscribe(address, handler)
} | identifier_body |
main.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow;
use fuchsia_async;
use futures::future::join_all;
use io::BufWriter;
use json5format::{Json5Format, ParsedDocument};
use std::{
ffi::OsString,
io::{self, Read, Write},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use structopt::StructOpt;
mod reader;
mod traverser;
/// Spawns a `jq` process with the specified filter and pipes `json_string` into its stdin. Returns
/// its jq output or an error if it produces an error. If `jq_path` is `None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if !Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if !status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"## | let filter = String::from("{foo: .foo, baz: .bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
"first": "John",
"middle": "Jacob"
},
"children": [
"Buffy",
"Biff",
"Balto"
],
// Consider adding a note field to the `other` contact option
"contact_options": [
{
"home": {
"email": "jj@notreallygmail.com", // This was the original user id.
// Now user id's are hash values.
"phone": "212-555-4321"
},
"other": {
"email": "volunteering@serviceprojectsrus.org"
},
"work": {
"phone": "212-555-1234",
"email": "john.j.smith@worksforme.gov"
}
}
],
"address": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
/* Update schema to support multiple addresses:
"work": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
}
*/
}
}
"##,
);
file.write_all(json5_string.as_bytes()).unwrap();
let (parsed_json5, json_string) = reader::read_json5_fromfile(&tmp_path).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&".".to_string(), parsed_json5, json_string, &jq_path).await.unwrap(),
run_jq5_on_file(&".".to_string(), &tmp_path, &jq_path).await.unwrap()
)
}
} | );
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() { | random_line_split |
main.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow;
use fuchsia_async;
use futures::future::join_all;
use io::BufWriter;
use json5format::{Json5Format, ParsedDocument};
use std::{
ffi::OsString,
io::{self, Read, Write},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use structopt::StructOpt;
mod reader;
mod traverser;
/// Spawns a `jq` process with the specified filter and pipes `json_string` into its stdin. Returns
/// its jq output or an error if it produces an error. If `jq_path` is `None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if !Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if !status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() |
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo: .foo, baz: .bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
"first": "John",
"middle": "Jacob"
},
"children": [
"Buffy",
"Biff",
"Balto"
],
// Consider adding a note field to the `other` contact option
"contact_options": [
{
"home": {
"email": "jj@notreallygmail.com", // This was the original user id.
// Now user id's are hash values.
"phone": "212-555-4321"
},
"other": {
"email": "volunteering@serviceprojectsrus.org"
},
"work": {
"phone": "212-555-1234",
"email": "john.j.smith@worksforme.gov"
}
}
],
"address": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
/* Update schema to support multiple addresses:
"work": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
}
*/
}
}
"##,
);
file.write_all(json5_string.as_bytes()).unwrap();
let (parsed_json5, json_string) = reader::read_json5_fromfile(&tmp_path).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&".".to_string(), parsed_json5, json_string, &jq_path).await.unwrap(),
run_jq5_on_file(&".".to_string(), &tmp_path, &jq_path).await.unwrap()
)
}
}
| {
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
} | identifier_body |
main.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow;
use fuchsia_async;
use futures::future::join_all;
use io::BufWriter;
use json5format::{Json5Format, ParsedDocument};
use std::{
ffi::OsString,
io::{self, Read, Write},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use structopt::StructOpt;
mod reader;
mod traverser;
/// Spawns a `jq` process with the specified filter and pipes `json_string` into its stdin. Returns
/// its jq output or an error if it produces an error. If `jq_path` is `None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if !Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if !status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn | (
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo: .foo, baz: .bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
"first": "John",
"middle": "Jacob"
},
"children": [
"Buffy",
"Biff",
"Balto"
],
// Consider adding a note field to the `other` contact option
"contact_options": [
{
"home": {
"email": "jj@notreallygmail.com", // This was the original user id.
// Now user id's are hash values.
"phone": "212-555-4321"
},
"other": {
"email": "volunteering@serviceprojectsrus.org"
},
"work": {
"phone": "212-555-1234",
"email": "john.j.smith@worksforme.gov"
}
}
],
"address": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
/* Update schema to support multiple addresses:
"work": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
}
*/
}
}
"##,
);
file.write_all(json5_string.as_bytes()).unwrap();
let (parsed_json5, json_string) = reader::read_json5_fromfile(&tmp_path).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&".".to_string(), parsed_json5, json_string, &jq_path).await.unwrap(),
run_jq5_on_file(&".".to_string(), &tmp_path, &jq_path).await.unwrap()
)
}
}
| run_jq5_on_file | identifier_name |
main.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow;
use fuchsia_async;
use futures::future::join_all;
use io::BufWriter;
use json5format::{Json5Format, ParsedDocument};
use std::{
ffi::OsString,
io::{self, Read, Write},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use structopt::StructOpt;
mod reader;
mod traverser;
/// Spawns a `jq` process with the specified filter and pipes `json_string` into its stdin. Returns
/// its jq output or an error if it produces an error. If `jq_path` is `None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if !Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if !status.success() | else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo: .foo, baz: .bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
"first": "John",
"middle": "Jacob"
},
"children": [
"Buffy",
"Biff",
"Balto"
],
// Consider adding a note field to the `other` contact option
"contact_options": [
{
"home": {
"email": "jj@notreallygmail.com", // This was the original user id.
// Now user id's are hash values.
"phone": "212-555-4321"
},
"other": {
"email": "volunteering@serviceprojectsrus.org"
},
"work": {
"phone": "212-555-1234",
"email": "john.j.smith@worksforme.gov"
}
}
],
"address": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
/* Update schema to support multiple addresses:
"work": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
}
*/
}
}
"##,
);
file.write_all(json5_string.as_bytes()).unwrap();
let (parsed_json5, json_string) = reader::read_json5_fromfile(&tmp_path).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&".".to_string(), parsed_json5, json_string, &jq_path).await.unwrap(),
run_jq5_on_file(&".".to_string(), &tmp_path, &jq_path).await.unwrap()
)
}
}
| {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} | conditional_block |
DAQClient.py | #!/usr/bin/env python
import os, socket, sys, threading
from CnCLogger import CnCLogger
from DAQRPC import RPCClient
from RunSet import RunSet
from UniqueID import UniqueID
from exc_string import exc_string, set_exc_string_encoding
set_exc_string_encoding("ascii")
# Find install location via $PDAQ_HOME, otherwise use locate_pdaq.py
if os.environ.has_key("PDAQ_HOME"):
metaDir = os.environ["PDAQ_HOME"]
else:
from locate_pdaq import find_pdaq_trunk
metaDir = find_pdaq_trunk()
# add meta-project python dir to Python library search path
sys.path.append(os.path.join(metaDir, 'src', 'main', 'python'))
from SVNVersionInfo import get_version_info
SVN_ID = "$Id: CnCServer.py 4782 2009-12-04 15:50:49Z dglo $"
class BeanFieldNotFoundException(Exception): pass
class MBeanClient(object):
def __init__(self, compName, host, port):
self.__compName = compName
self.__client = RPCClient(host, port)
self.__beanFields = {}
self.__beanList = self.__client.mbean.listMBeans()
for bean in self.__beanList:
self.__beanFields[bean] = self.__client.mbean.listGetters(bean)
@classmethod
def __unFixValue(cls,obj):
""" Look for numbers masquerading as strings. If an obj is a
string and successfully converts to a number, return that
convertion. If obj is a dict or list, recuse into it
converting all such masquerading strings. All other types are
unaltered. This pairs with the similarly named fix* methods in
icecube.daq.juggler.mbean.XMLRPCServer """
if type(obj) is dict:
for k in obj.keys():
obj[k] = cls.__unFixValue(obj[k])
elif type(obj) is list:
for i in xrange(0, len(obj)):
obj[i] = cls.__unFixValue(obj[i])
elif type(obj) is str:
try:
return int(obj)
except ValueError:
pass
return obj
def checkBeanField(self, bean, fld):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
if fld not in self.__beanFields[bean]:
msg = "Bean %s field %s not in list of bean fields for %s (%s)" % \
(bean, fld, self.__compName, str(self.__beanFields[bean]))
raise BeanFieldNotFoundException(msg)
def get(self, bean, fld):
self.checkBeanField(bean, fld)
return self.__unFixValue(self.__client.mbean.get(bean, fld))
def getAttributes(self, bean, fldList):
attrs = self.__client.mbean.getAttributes(bean, fldList)
if type(attrs) == dict and len(attrs) > 0:
for k in attrs.keys():
attrs[k] = self.__unFixValue(attrs[k])
return attrs
def getBeanNames(self):
return self.__beanList
def getBeanFields(self, bean):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
return self.__beanFields[bean]
class ComponentName(object):
"DAQ component name"
def __init__(self, name, num):
self.__name = name
self.__num = num
def __repr__(self):
return self.fullName()
def fileName(self):
return '%s-%d' % (self.__name, self.__num)
def fullName(self):
if self.__num == 0 and self.__name[-3:].lower() != 'hub':
return self.__name
return '%s#%d' % (self.__name, self.__num)
def isBuilder(self):
"Is this an eventBuilder (or debugging fooBuilder)?"
return self.__name.endswith("Builder")
def isComponent(self, name, num=-1):
"Does this component have the specified name and number?"
return self.__name == name and (num < 0 or self.__num == num)
def isHub(self):
return self.__name.endswith("Hub")
def name(self):
return self.__name
def num(self):
return self.__num
class DAQClientException(Exception): pass
class DAQClient(ComponentName):
"""DAQ component
id - internal client ID
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component's MBean server port number
connectors - list of Connectors
client - XML-RPC client
deadCount - number of sequential failed pings
cmdOrder - order in which start/stop commands are issued
"""
# next component ID
#
ID = UniqueID()
# internal state indicating that the client hasn't answered
# some number of pings but has not been declared dead
#
STATE_MISSING = 'MIA'
# internal state indicating that the client is
# no longer responding to pings
#
STATE_DEAD = RunSet.STATE_DEAD
def __init__(self, name, num, host, port, mbeanPort, connectors,
quiet=False):
"""
DAQClient constructor
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component MBean port number
connectors - list of Connectors
"""
super(DAQClient, self).__init__(name, num)
self.__id = DAQClient.ID.next()
self.__host = host
self.__port = port
self.__mbeanPort = mbeanPort
self.__connectors = connectors
self.__deadCount = 0
self.__cmdOrder = None
self.__log = self.createLogger(quiet=quiet)
self.__client = self.createClient(host, port)
try:
self.__mbean = self.createMBeanClient(host, mbeanPort)
except:
self.__mbean = None
def __str__(self):
"String description"
if self.__port <= 0:
hpStr = ''
else:
hpStr = ' at %s:%d' % (self.__host, self.__port)
if self.__mbeanPort <= 0:
mbeanStr = ''
else:
mbeanStr = ' M#%d' % self.__mbeanPort
extraStr = ''
if self.__connectors and len(self.__connectors) > 0:
first = True
for c in self.__connectors:
if first:
extraStr += ' [' + str(c)
first = False
else:
extraStr += ' ' + str(c)
extraStr += ']'
return "ID#%d %s%s%s%s" % \
(self.__id, self.fullName(), hpStr, mbeanStr, extraStr)
def checkBeanField(self, bean, field):
if self.__mbean is not None:
self.__mbean.checkBeanField(bean, field)
def close(self):
self.__log.close()
def commitSubrun(self, subrunNum, latestTime):
"Start marking events with the subrun number"
try:
return self.__client.xmlrpc.commitSubrun(subrunNum, latestTime)
except:
self.__log.error(exc_string())
return None
def configure(self, configName=None):
"Configure this component"
try:
if not configName:
return self.__client.xmlrpc.configure()
else:
return self.__client.xmlrpc.configure(configName)
except:
self.__log.error(exc_string())
return None
def connect(self, connList=None):
"Connect this component with other components in a runset"
if not connList:
return self.__client.xmlrpc.connect()
cl = []
for conn in connList:
cl.append(conn.map())
return self.__client.xmlrpc.connect(cl)
def connectors(self):
return self.__connectors[:]
def createClient(self, host, port):
return RPCClient(host, port)
def createLogger(self, quiet):
return CnCLogger(quiet=quiet)
def createMBeanClient(self, host, mbeanPort):
return MBeanClient(self.fullName(), host, mbeanPort)
def events(self, subrunNumber):
"Get the number of events in the specified subrun"
try:
evts = self.__client.xmlrpc.getEvents(subrunNumber)
if type(evts) == str:
evts = long(evts[:-1])
return evts
except:
self.__log.error(exc_string())
return None
def forcedStop(self):
"Force component to stop running"
try:
return self.__client.xmlrpc.forcedStop()
except:
self.__log.error(exc_string())
return None
def getBeanFields(self, bean):
if self.__mbean is None:
return []
return self.__mbean.getBeanFields(bean)
def getBeanNames(self):
if self.__mbean is None:
return []
return self.__mbean.getBeanNames()
def getMultiBeanFields(self, name, fieldList):
if self.__mbean is None:
return {}
return self.__mbean.getAttributes(name, fieldList)
def getNonstoppedConnectorsString(self):
"""
Return string describing states of all connectors
which have not yet stopped
"""
try:
connStates = self.__client.xmlrpc.listConnectorStates()
except:
self.__log.error(exc_string())
connStates = []
csStr = None
for cs in connStates:
if cs["state"] == 'idle':
continue
if csStr is None:
csStr = '['
else:
csStr += ', '
csStr += '%s:%s' % (cs["type"], cs["state"])
if csStr is None:
csStr = ''
else:
csStr += ']'
return csStr
def getSingleBeanField(self, name, field):
if self.__mbean is None:
return None
return self.__mbean.get(name, field)
def host(self):
return self.__host
def id(self):
return self.__id
def isSource(self):
"Is this component a source of data?"
# XXX Hack for stringHubs which are sources but which confuse
# things by also reading requests from the eventBuilder
if self.isHub():
return True
for conn in self.__connectors:
if conn.isInput():
return False
return True
def listConnectorStates(self):
return self.__client.xmlrpc.listConnectorStates()
def logTo(self, logIP, logPort, liveIP, livePort):
"Send log messages to the specified host and port"
self.__log.openLog(logIP, logPort, liveIP, livePort)
if logIP is None:
logIP = ''
if logPort is None:
logPort = 0
if liveIP is None:
liveIP = ''
if livePort is None:
livePort = 0
self.__client.xmlrpc.logTo(logIP, logPort, liveIP, livePort)
infoStr = self.__client.xmlrpc.getVersionInfo()
self.__log.debug(("Version info: %(filename)s %(revision)s" +
" %(date)s %(time)s %(author)s %(release)s" +
" %(repo_rev)s") % get_version_info(infoStr))
def map(self):
|
def mbeanPort(self):
return self.__mbeanPort
def monitor(self):
"Return the monitoring value"
return self.state()
def order(self):
return self.__cmdOrder
def port(self):
return self.__port
def prepareSubrun(self, subrunNum):
"Start marking events as bogus in preparation for subrun"
try:
return self.__client.xmlrpc.prepareSubrun(subrunNum)
except:
self.__log.error(exc_string())
return None
def reset(self):
"Reset component back to the idle state"
self.__log.closeLog()
return self.__client.xmlrpc.reset()
def resetLogging(self):
"Reset component back to the idle state"
self.__log.resetLog()
return self.__client.xmlrpc.resetLogging()
def setOrder(self, orderNum):
self.__cmdOrder = orderNum
def startRun(self, runNum):
"Start component processing DAQ data"
try:
return self.__client.xmlrpc.startRun(runNum)
except:
self.__log.error(exc_string())
return None
def startSubrun(self, data):
"Send subrun data to stringHubs"
try:
return self.__client.xmlrpc.startSubrun(data)
except:
self.__log.error(exc_string())
return None
def state(self):
"Get current state"
try:
state = self.__client.xmlrpc.getState()
except socket.error:
state = None
except:
self.__log.error(exc_string())
state = None
if not state:
self.__deadCount += 1
if self.__deadCount < 3:
state = DAQClient.STATE_MISSING
else:
state = DAQClient.STATE_DEAD
return state
def stopRun(self):
"Stop component processing DAQ data"
try:
return self.__client.xmlrpc.stopRun()
except:
self.__log.error(exc_string())
return None
def terminate(self):
"Terminate component"
state = self.state()
if state != "idle" and state != "ready" and \
state != self.STATE_MISSING and state != self.STATE_DEAD:
raise DAQClientException("%s state is %s" % (self, state))
self.__log.closeFinal()
try:
self.__client.xmlrpc.terminate()
except:
# ignore termination exceptions
pass
| return { "id" : self.__id,
"compName" : self.name(),
"compNum" : self.num(),
"host" : self.__host,
"rpcPort" : self.__port,
"mbeanPort" : self.__mbeanPort } | identifier_body |
DAQClient.py | #!/usr/bin/env python
import os, socket, sys, threading
from CnCLogger import CnCLogger
from DAQRPC import RPCClient
from RunSet import RunSet
from UniqueID import UniqueID
from exc_string import exc_string, set_exc_string_encoding
set_exc_string_encoding("ascii")
# Find install location via $PDAQ_HOME, otherwise use locate_pdaq.py
if os.environ.has_key("PDAQ_HOME"):
metaDir = os.environ["PDAQ_HOME"]
else:
from locate_pdaq import find_pdaq_trunk
metaDir = find_pdaq_trunk()
# add meta-project python dir to Python library search path
sys.path.append(os.path.join(metaDir, 'src', 'main', 'python'))
from SVNVersionInfo import get_version_info
SVN_ID = "$Id: CnCServer.py 4782 2009-12-04 15:50:49Z dglo $"
class BeanFieldNotFoundException(Exception): pass
class MBeanClient(object):
def __init__(self, compName, host, port):
self.__compName = compName
self.__client = RPCClient(host, port)
self.__beanFields = {}
self.__beanList = self.__client.mbean.listMBeans()
for bean in self.__beanList:
self.__beanFields[bean] = self.__client.mbean.listGetters(bean)
@classmethod
def __unFixValue(cls,obj):
""" Look for numbers masquerading as strings. If an obj is a
string and successfully converts to a number, return that
convertion. If obj is a dict or list, recuse into it
converting all such masquerading strings. All other types are
unaltered. This pairs with the similarly named fix* methods in
icecube.daq.juggler.mbean.XMLRPCServer """
if type(obj) is dict:
for k in obj.keys():
obj[k] = cls.__unFixValue(obj[k])
elif type(obj) is list:
for i in xrange(0, len(obj)):
obj[i] = cls.__unFixValue(obj[i])
elif type(obj) is str:
try:
return int(obj)
except ValueError:
pass
return obj
def checkBeanField(self, bean, fld):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
if fld not in self.__beanFields[bean]:
msg = "Bean %s field %s not in list of bean fields for %s (%s)" % \
(bean, fld, self.__compName, str(self.__beanFields[bean]))
raise BeanFieldNotFoundException(msg)
def get(self, bean, fld):
self.checkBeanField(bean, fld)
return self.__unFixValue(self.__client.mbean.get(bean, fld))
def getAttributes(self, bean, fldList):
attrs = self.__client.mbean.getAttributes(bean, fldList)
if type(attrs) == dict and len(attrs) > 0:
for k in attrs.keys():
|
return attrs
def getBeanNames(self):
return self.__beanList
def getBeanFields(self, bean):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
return self.__beanFields[bean]
class ComponentName(object):
"DAQ component name"
def __init__(self, name, num):
self.__name = name
self.__num = num
def __repr__(self):
return self.fullName()
def fileName(self):
return '%s-%d' % (self.__name, self.__num)
def fullName(self):
if self.__num == 0 and self.__name[-3:].lower() != 'hub':
return self.__name
return '%s#%d' % (self.__name, self.__num)
def isBuilder(self):
"Is this an eventBuilder (or debugging fooBuilder)?"
return self.__name.endswith("Builder")
def isComponent(self, name, num=-1):
"Does this component have the specified name and number?"
return self.__name == name and (num < 0 or self.__num == num)
def isHub(self):
return self.__name.endswith("Hub")
def name(self):
return self.__name
def num(self):
return self.__num
class DAQClientException(Exception): pass
class DAQClient(ComponentName):
"""DAQ component
id - internal client ID
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component's MBean server port number
connectors - list of Connectors
client - XML-RPC client
deadCount - number of sequential failed pings
cmdOrder - order in which start/stop commands are issued
"""
# next component ID
#
ID = UniqueID()
# internal state indicating that the client hasn't answered
# some number of pings but has not been declared dead
#
STATE_MISSING = 'MIA'
# internal state indicating that the client is
# no longer responding to pings
#
STATE_DEAD = RunSet.STATE_DEAD
def __init__(self, name, num, host, port, mbeanPort, connectors,
quiet=False):
"""
DAQClient constructor
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component MBean port number
connectors - list of Connectors
"""
super(DAQClient, self).__init__(name, num)
self.__id = DAQClient.ID.next()
self.__host = host
self.__port = port
self.__mbeanPort = mbeanPort
self.__connectors = connectors
self.__deadCount = 0
self.__cmdOrder = None
self.__log = self.createLogger(quiet=quiet)
self.__client = self.createClient(host, port)
try:
self.__mbean = self.createMBeanClient(host, mbeanPort)
except:
self.__mbean = None
def __str__(self):
"String description"
if self.__port <= 0:
hpStr = ''
else:
hpStr = ' at %s:%d' % (self.__host, self.__port)
if self.__mbeanPort <= 0:
mbeanStr = ''
else:
mbeanStr = ' M#%d' % self.__mbeanPort
extraStr = ''
if self.__connectors and len(self.__connectors) > 0:
first = True
for c in self.__connectors:
if first:
extraStr += ' [' + str(c)
first = False
else:
extraStr += ' ' + str(c)
extraStr += ']'
return "ID#%d %s%s%s%s" % \
(self.__id, self.fullName(), hpStr, mbeanStr, extraStr)
def checkBeanField(self, bean, field):
if self.__mbean is not None:
self.__mbean.checkBeanField(bean, field)
def close(self):
self.__log.close()
def commitSubrun(self, subrunNum, latestTime):
"Start marking events with the subrun number"
try:
return self.__client.xmlrpc.commitSubrun(subrunNum, latestTime)
except:
self.__log.error(exc_string())
return None
def configure(self, configName=None):
"Configure this component"
try:
if not configName:
return self.__client.xmlrpc.configure()
else:
return self.__client.xmlrpc.configure(configName)
except:
self.__log.error(exc_string())
return None
def connect(self, connList=None):
"Connect this component with other components in a runset"
if not connList:
return self.__client.xmlrpc.connect()
cl = []
for conn in connList:
cl.append(conn.map())
return self.__client.xmlrpc.connect(cl)
def connectors(self):
return self.__connectors[:]
def createClient(self, host, port):
return RPCClient(host, port)
def createLogger(self, quiet):
return CnCLogger(quiet=quiet)
def createMBeanClient(self, host, mbeanPort):
return MBeanClient(self.fullName(), host, mbeanPort)
def events(self, subrunNumber):
"Get the number of events in the specified subrun"
try:
evts = self.__client.xmlrpc.getEvents(subrunNumber)
if type(evts) == str:
evts = long(evts[:-1])
return evts
except:
self.__log.error(exc_string())
return None
def forcedStop(self):
"Force component to stop running"
try:
return self.__client.xmlrpc.forcedStop()
except:
self.__log.error(exc_string())
return None
def getBeanFields(self, bean):
if self.__mbean is None:
return []
return self.__mbean.getBeanFields(bean)
def getBeanNames(self):
if self.__mbean is None:
return []
return self.__mbean.getBeanNames()
def getMultiBeanFields(self, name, fieldList):
if self.__mbean is None:
return {}
return self.__mbean.getAttributes(name, fieldList)
def getNonstoppedConnectorsString(self):
"""
Return string describing states of all connectors
which have not yet stopped
"""
try:
connStates = self.__client.xmlrpc.listConnectorStates()
except:
self.__log.error(exc_string())
connStates = []
csStr = None
for cs in connStates:
if cs["state"] == 'idle':
continue
if csStr is None:
csStr = '['
else:
csStr += ', '
csStr += '%s:%s' % (cs["type"], cs["state"])
if csStr is None:
csStr = ''
else:
csStr += ']'
return csStr
def getSingleBeanField(self, name, field):
if self.__mbean is None:
return None
return self.__mbean.get(name, field)
def host(self):
return self.__host
def id(self):
return self.__id
def isSource(self):
"Is this component a source of data?"
# XXX Hack for stringHubs which are sources but which confuse
# things by also reading requests from the eventBuilder
if self.isHub():
return True
for conn in self.__connectors:
if conn.isInput():
return False
return True
def listConnectorStates(self):
return self.__client.xmlrpc.listConnectorStates()
def logTo(self, logIP, logPort, liveIP, livePort):
"Send log messages to the specified host and port"
self.__log.openLog(logIP, logPort, liveIP, livePort)
if logIP is None:
logIP = ''
if logPort is None:
logPort = 0
if liveIP is None:
liveIP = ''
if livePort is None:
livePort = 0
self.__client.xmlrpc.logTo(logIP, logPort, liveIP, livePort)
infoStr = self.__client.xmlrpc.getVersionInfo()
self.__log.debug(("Version info: %(filename)s %(revision)s" +
" %(date)s %(time)s %(author)s %(release)s" +
" %(repo_rev)s") % get_version_info(infoStr))
def map(self):
return { "id" : self.__id,
"compName" : self.name(),
"compNum" : self.num(),
"host" : self.__host,
"rpcPort" : self.__port,
"mbeanPort" : self.__mbeanPort }
def mbeanPort(self):
return self.__mbeanPort
def monitor(self):
"Return the monitoring value"
return self.state()
def order(self):
return self.__cmdOrder
def port(self):
return self.__port
def prepareSubrun(self, subrunNum):
"Start marking events as bogus in preparation for subrun"
try:
return self.__client.xmlrpc.prepareSubrun(subrunNum)
except:
self.__log.error(exc_string())
return None
def reset(self):
"Reset component back to the idle state"
self.__log.closeLog()
return self.__client.xmlrpc.reset()
def resetLogging(self):
"Reset component back to the idle state"
self.__log.resetLog()
return self.__client.xmlrpc.resetLogging()
def setOrder(self, orderNum):
self.__cmdOrder = orderNum
def startRun(self, runNum):
"Start component processing DAQ data"
try:
return self.__client.xmlrpc.startRun(runNum)
except:
self.__log.error(exc_string())
return None
def startSubrun(self, data):
"Send subrun data to stringHubs"
try:
return self.__client.xmlrpc.startSubrun(data)
except:
self.__log.error(exc_string())
return None
def state(self):
"Get current state"
try:
state = self.__client.xmlrpc.getState()
except socket.error:
state = None
except:
self.__log.error(exc_string())
state = None
if not state:
self.__deadCount += 1
if self.__deadCount < 3:
state = DAQClient.STATE_MISSING
else:
state = DAQClient.STATE_DEAD
return state
def stopRun(self):
"Stop component processing DAQ data"
try:
return self.__client.xmlrpc.stopRun()
except:
self.__log.error(exc_string())
return None
def terminate(self):
"Terminate component"
state = self.state()
if state != "idle" and state != "ready" and \
state != self.STATE_MISSING and state != self.STATE_DEAD:
raise DAQClientException("%s state is %s" % (self, state))
self.__log.closeFinal()
try:
self.__client.xmlrpc.terminate()
except:
# ignore termination exceptions
pass
| attrs[k] = self.__unFixValue(attrs[k]) | conditional_block |
DAQClient.py | #!/usr/bin/env python
import os, socket, sys, threading
from CnCLogger import CnCLogger
from DAQRPC import RPCClient
from RunSet import RunSet
from UniqueID import UniqueID
from exc_string import exc_string, set_exc_string_encoding
set_exc_string_encoding("ascii")
# Find install location via $PDAQ_HOME, otherwise use locate_pdaq.py
if os.environ.has_key("PDAQ_HOME"):
metaDir = os.environ["PDAQ_HOME"]
else:
from locate_pdaq import find_pdaq_trunk
metaDir = find_pdaq_trunk()
# add meta-project python dir to Python library search path
sys.path.append(os.path.join(metaDir, 'src', 'main', 'python'))
from SVNVersionInfo import get_version_info
SVN_ID = "$Id: CnCServer.py 4782 2009-12-04 15:50:49Z dglo $"
class BeanFieldNotFoundException(Exception): pass
class MBeanClient(object):
def __init__(self, compName, host, port):
self.__compName = compName
self.__client = RPCClient(host, port)
self.__beanFields = {}
self.__beanList = self.__client.mbean.listMBeans()
for bean in self.__beanList:
self.__beanFields[bean] = self.__client.mbean.listGetters(bean)
@classmethod
def __unFixValue(cls,obj):
""" Look for numbers masquerading as strings. If an obj is a
string and successfully converts to a number, return that
convertion. If obj is a dict or list, recuse into it
converting all such masquerading strings. All other types are
unaltered. This pairs with the similarly named fix* methods in
icecube.daq.juggler.mbean.XMLRPCServer """
if type(obj) is dict:
for k in obj.keys():
obj[k] = cls.__unFixValue(obj[k])
elif type(obj) is list:
for i in xrange(0, len(obj)):
obj[i] = cls.__unFixValue(obj[i])
elif type(obj) is str:
try:
return int(obj)
except ValueError:
pass
return obj
def checkBeanField(self, bean, fld):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
if fld not in self.__beanFields[bean]:
msg = "Bean %s field %s not in list of bean fields for %s (%s)" % \
(bean, fld, self.__compName, str(self.__beanFields[bean]))
raise BeanFieldNotFoundException(msg)
def get(self, bean, fld):
self.checkBeanField(bean, fld)
return self.__unFixValue(self.__client.mbean.get(bean, fld))
def getAttributes(self, bean, fldList):
attrs = self.__client.mbean.getAttributes(bean, fldList)
if type(attrs) == dict and len(attrs) > 0:
for k in attrs.keys():
attrs[k] = self.__unFixValue(attrs[k])
return attrs
def getBeanNames(self):
return self.__beanList
def getBeanFields(self, bean):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
return self.__beanFields[bean]
class ComponentName(object):
"DAQ component name"
def __init__(self, name, num):
self.__name = name
self.__num = num
def __repr__(self):
return self.fullName()
def fileName(self):
return '%s-%d' % (self.__name, self.__num)
def fullName(self):
if self.__num == 0 and self.__name[-3:].lower() != 'hub':
return self.__name
return '%s#%d' % (self.__name, self.__num)
def isBuilder(self):
"Is this an eventBuilder (or debugging fooBuilder)?"
return self.__name.endswith("Builder")
def isComponent(self, name, num=-1):
"Does this component have the specified name and number?"
return self.__name == name and (num < 0 or self.__num == num)
def isHub(self):
return self.__name.endswith("Hub")
def name(self):
return self.__name
def num(self):
return self.__num
class DAQClientException(Exception): pass
class DAQClient(ComponentName):
"""DAQ component
id - internal client ID
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component's MBean server port number
connectors - list of Connectors
client - XML-RPC client
deadCount - number of sequential failed pings
cmdOrder - order in which start/stop commands are issued
"""
# next component ID
#
ID = UniqueID()
# internal state indicating that the client hasn't answered
# some number of pings but has not been declared dead
#
STATE_MISSING = 'MIA'
# internal state indicating that the client is
# no longer responding to pings
#
STATE_DEAD = RunSet.STATE_DEAD
def __init__(self, name, num, host, port, mbeanPort, connectors,
quiet=False):
"""
DAQClient constructor
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component MBean port number
connectors - list of Connectors
"""
super(DAQClient, self).__init__(name, num)
self.__id = DAQClient.ID.next()
self.__host = host
self.__port = port
self.__mbeanPort = mbeanPort
self.__connectors = connectors
self.__deadCount = 0
self.__cmdOrder = None
self.__log = self.createLogger(quiet=quiet)
self.__client = self.createClient(host, port)
try:
self.__mbean = self.createMBeanClient(host, mbeanPort)
except:
self.__mbean = None
def __str__(self):
"String description"
if self.__port <= 0:
hpStr = ''
else:
hpStr = ' at %s:%d' % (self.__host, self.__port)
if self.__mbeanPort <= 0:
mbeanStr = ''
else:
mbeanStr = ' M#%d' % self.__mbeanPort
extraStr = ''
if self.__connectors and len(self.__connectors) > 0:
first = True
for c in self.__connectors:
if first:
extraStr += ' [' + str(c)
first = False
else:
extraStr += ' ' + str(c)
extraStr += ']'
return "ID#%d %s%s%s%s" % \
(self.__id, self.fullName(), hpStr, mbeanStr, extraStr)
def checkBeanField(self, bean, field):
if self.__mbean is not None:
self.__mbean.checkBeanField(bean, field)
def close(self):
self.__log.close()
def commitSubrun(self, subrunNum, latestTime):
"Start marking events with the subrun number"
try:
return self.__client.xmlrpc.commitSubrun(subrunNum, latestTime)
except:
self.__log.error(exc_string())
return None
def configure(self, configName=None):
"Configure this component"
try:
if not configName:
return self.__client.xmlrpc.configure()
else:
return self.__client.xmlrpc.configure(configName)
except:
self.__log.error(exc_string())
return None
def connect(self, connList=None):
"Connect this component with other components in a runset"
if not connList:
return self.__client.xmlrpc.connect()
cl = []
for conn in connList:
cl.append(conn.map())
return self.__client.xmlrpc.connect(cl)
def connectors(self):
return self.__connectors[:]
def createClient(self, host, port):
return RPCClient(host, port)
def createLogger(self, quiet):
return CnCLogger(quiet=quiet)
def createMBeanClient(self, host, mbeanPort):
return MBeanClient(self.fullName(), host, mbeanPort)
def events(self, subrunNumber):
"Get the number of events in the specified subrun"
try:
evts = self.__client.xmlrpc.getEvents(subrunNumber)
if type(evts) == str:
evts = long(evts[:-1])
return evts
except:
self.__log.error(exc_string())
return None
def forcedStop(self):
"Force component to stop running"
try:
return self.__client.xmlrpc.forcedStop()
except:
self.__log.error(exc_string())
return None
def getBeanFields(self, bean):
if self.__mbean is None:
return []
return self.__mbean.getBeanFields(bean)
def getBeanNames(self):
if self.__mbean is None:
return []
return self.__mbean.getBeanNames()
def getMultiBeanFields(self, name, fieldList):
if self.__mbean is None:
return {}
return self.__mbean.getAttributes(name, fieldList)
def getNonstoppedConnectorsString(self):
"""
Return string describing states of all connectors
which have not yet stopped
"""
try:
connStates = self.__client.xmlrpc.listConnectorStates()
except:
self.__log.error(exc_string())
connStates = []
csStr = None
for cs in connStates:
if cs["state"] == 'idle':
continue
if csStr is None:
csStr = '['
else:
csStr += ', '
csStr += '%s:%s' % (cs["type"], cs["state"])
if csStr is None:
csStr = '' | def getSingleBeanField(self, name, field):
if self.__mbean is None:
return None
return self.__mbean.get(name, field)
def host(self):
return self.__host
def id(self):
return self.__id
def isSource(self):
"Is this component a source of data?"
# XXX Hack for stringHubs which are sources but which confuse
# things by also reading requests from the eventBuilder
if self.isHub():
return True
for conn in self.__connectors:
if conn.isInput():
return False
return True
def listConnectorStates(self):
return self.__client.xmlrpc.listConnectorStates()
def logTo(self, logIP, logPort, liveIP, livePort):
"Send log messages to the specified host and port"
self.__log.openLog(logIP, logPort, liveIP, livePort)
if logIP is None:
logIP = ''
if logPort is None:
logPort = 0
if liveIP is None:
liveIP = ''
if livePort is None:
livePort = 0
self.__client.xmlrpc.logTo(logIP, logPort, liveIP, livePort)
infoStr = self.__client.xmlrpc.getVersionInfo()
self.__log.debug(("Version info: %(filename)s %(revision)s" +
" %(date)s %(time)s %(author)s %(release)s" +
" %(repo_rev)s") % get_version_info(infoStr))
def map(self):
return { "id" : self.__id,
"compName" : self.name(),
"compNum" : self.num(),
"host" : self.__host,
"rpcPort" : self.__port,
"mbeanPort" : self.__mbeanPort }
def mbeanPort(self):
return self.__mbeanPort
def monitor(self):
"Return the monitoring value"
return self.state()
def order(self):
return self.__cmdOrder
def port(self):
return self.__port
def prepareSubrun(self, subrunNum):
"Start marking events as bogus in preparation for subrun"
try:
return self.__client.xmlrpc.prepareSubrun(subrunNum)
except:
self.__log.error(exc_string())
return None
def reset(self):
"Reset component back to the idle state"
self.__log.closeLog()
return self.__client.xmlrpc.reset()
def resetLogging(self):
"Reset component back to the idle state"
self.__log.resetLog()
return self.__client.xmlrpc.resetLogging()
def setOrder(self, orderNum):
self.__cmdOrder = orderNum
def startRun(self, runNum):
"Start component processing DAQ data"
try:
return self.__client.xmlrpc.startRun(runNum)
except:
self.__log.error(exc_string())
return None
def startSubrun(self, data):
"Send subrun data to stringHubs"
try:
return self.__client.xmlrpc.startSubrun(data)
except:
self.__log.error(exc_string())
return None
def state(self):
"Get current state"
try:
state = self.__client.xmlrpc.getState()
except socket.error:
state = None
except:
self.__log.error(exc_string())
state = None
if not state:
self.__deadCount += 1
if self.__deadCount < 3:
state = DAQClient.STATE_MISSING
else:
state = DAQClient.STATE_DEAD
return state
def stopRun(self):
"Stop component processing DAQ data"
try:
return self.__client.xmlrpc.stopRun()
except:
self.__log.error(exc_string())
return None
def terminate(self):
"Terminate component"
state = self.state()
if state != "idle" and state != "ready" and \
state != self.STATE_MISSING and state != self.STATE_DEAD:
raise DAQClientException("%s state is %s" % (self, state))
self.__log.closeFinal()
try:
self.__client.xmlrpc.terminate()
except:
# ignore termination exceptions
pass | else:
csStr += ']'
return csStr
| random_line_split |
DAQClient.py | #!/usr/bin/env python
import os, socket, sys, threading
from CnCLogger import CnCLogger
from DAQRPC import RPCClient
from RunSet import RunSet
from UniqueID import UniqueID
from exc_string import exc_string, set_exc_string_encoding
set_exc_string_encoding("ascii")
# Find install location via $PDAQ_HOME, otherwise use locate_pdaq.py
if os.environ.has_key("PDAQ_HOME"):
metaDir = os.environ["PDAQ_HOME"]
else:
from locate_pdaq import find_pdaq_trunk
metaDir = find_pdaq_trunk()
# add meta-project python dir to Python library search path
sys.path.append(os.path.join(metaDir, 'src', 'main', 'python'))
from SVNVersionInfo import get_version_info
SVN_ID = "$Id: CnCServer.py 4782 2009-12-04 15:50:49Z dglo $"
class BeanFieldNotFoundException(Exception): pass
class MBeanClient(object):
def __init__(self, compName, host, port):
self.__compName = compName
self.__client = RPCClient(host, port)
self.__beanFields = {}
self.__beanList = self.__client.mbean.listMBeans()
for bean in self.__beanList:
self.__beanFields[bean] = self.__client.mbean.listGetters(bean)
@classmethod
def __unFixValue(cls,obj):
""" Look for numbers masquerading as strings. If an obj is a
string and successfully converts to a number, return that
convertion. If obj is a dict or list, recuse into it
converting all such masquerading strings. All other types are
unaltered. This pairs with the similarly named fix* methods in
icecube.daq.juggler.mbean.XMLRPCServer """
if type(obj) is dict:
for k in obj.keys():
obj[k] = cls.__unFixValue(obj[k])
elif type(obj) is list:
for i in xrange(0, len(obj)):
obj[i] = cls.__unFixValue(obj[i])
elif type(obj) is str:
try:
return int(obj)
except ValueError:
pass
return obj
def checkBeanField(self, bean, fld):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
if fld not in self.__beanFields[bean]:
msg = "Bean %s field %s not in list of bean fields for %s (%s)" % \
(bean, fld, self.__compName, str(self.__beanFields[bean]))
raise BeanFieldNotFoundException(msg)
def get(self, bean, fld):
self.checkBeanField(bean, fld)
return self.__unFixValue(self.__client.mbean.get(bean, fld))
def getAttributes(self, bean, fldList):
attrs = self.__client.mbean.getAttributes(bean, fldList)
if type(attrs) == dict and len(attrs) > 0:
for k in attrs.keys():
attrs[k] = self.__unFixValue(attrs[k])
return attrs
def getBeanNames(self):
return self.__beanList
def getBeanFields(self, bean):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
return self.__beanFields[bean]
class ComponentName(object):
"DAQ component name"
def __init__(self, name, num):
self.__name = name
self.__num = num
def __repr__(self):
return self.fullName()
def fileName(self):
return '%s-%d' % (self.__name, self.__num)
def fullName(self):
if self.__num == 0 and self.__name[-3:].lower() != 'hub':
return self.__name
return '%s#%d' % (self.__name, self.__num)
def isBuilder(self):
"Is this an eventBuilder (or debugging fooBuilder)?"
return self.__name.endswith("Builder")
def isComponent(self, name, num=-1):
"Does this component have the specified name and number?"
return self.__name == name and (num < 0 or self.__num == num)
def isHub(self):
return self.__name.endswith("Hub")
def name(self):
return self.__name
def num(self):
return self.__num
class DAQClientException(Exception): pass
class DAQClient(ComponentName):
"""DAQ component
id - internal client ID
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component's MBean server port number
connectors - list of Connectors
client - XML-RPC client
deadCount - number of sequential failed pings
cmdOrder - order in which start/stop commands are issued
"""
# next component ID
#
ID = UniqueID()
# internal state indicating that the client hasn't answered
# some number of pings but has not been declared dead
#
STATE_MISSING = 'MIA'
# internal state indicating that the client is
# no longer responding to pings
#
STATE_DEAD = RunSet.STATE_DEAD
def __init__(self, name, num, host, port, mbeanPort, connectors,
quiet=False):
"""
DAQClient constructor
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component MBean port number
connectors - list of Connectors
"""
super(DAQClient, self).__init__(name, num)
self.__id = DAQClient.ID.next()
self.__host = host
self.__port = port
self.__mbeanPort = mbeanPort
self.__connectors = connectors
self.__deadCount = 0
self.__cmdOrder = None
self.__log = self.createLogger(quiet=quiet)
self.__client = self.createClient(host, port)
try:
self.__mbean = self.createMBeanClient(host, mbeanPort)
except:
self.__mbean = None
def __str__(self):
"String description"
if self.__port <= 0:
hpStr = ''
else:
hpStr = ' at %s:%d' % (self.__host, self.__port)
if self.__mbeanPort <= 0:
mbeanStr = ''
else:
mbeanStr = ' M#%d' % self.__mbeanPort
extraStr = ''
if self.__connectors and len(self.__connectors) > 0:
first = True
for c in self.__connectors:
if first:
extraStr += ' [' + str(c)
first = False
else:
extraStr += ' ' + str(c)
extraStr += ']'
return "ID#%d %s%s%s%s" % \
(self.__id, self.fullName(), hpStr, mbeanStr, extraStr)
def checkBeanField(self, bean, field):
if self.__mbean is not None:
self.__mbean.checkBeanField(bean, field)
def close(self):
self.__log.close()
def commitSubrun(self, subrunNum, latestTime):
"Start marking events with the subrun number"
try:
return self.__client.xmlrpc.commitSubrun(subrunNum, latestTime)
except:
self.__log.error(exc_string())
return None
def configure(self, configName=None):
"Configure this component"
try:
if not configName:
return self.__client.xmlrpc.configure()
else:
return self.__client.xmlrpc.configure(configName)
except:
self.__log.error(exc_string())
return None
def connect(self, connList=None):
"Connect this component with other components in a runset"
if not connList:
return self.__client.xmlrpc.connect()
cl = []
for conn in connList:
cl.append(conn.map())
return self.__client.xmlrpc.connect(cl)
def connectors(self):
return self.__connectors[:]
def createClient(self, host, port):
return RPCClient(host, port)
def createLogger(self, quiet):
return CnCLogger(quiet=quiet)
def createMBeanClient(self, host, mbeanPort):
return MBeanClient(self.fullName(), host, mbeanPort)
def events(self, subrunNumber):
"Get the number of events in the specified subrun"
try:
evts = self.__client.xmlrpc.getEvents(subrunNumber)
if type(evts) == str:
evts = long(evts[:-1])
return evts
except:
self.__log.error(exc_string())
return None
def forcedStop(self):
"Force component to stop running"
try:
return self.__client.xmlrpc.forcedStop()
except:
self.__log.error(exc_string())
return None
def getBeanFields(self, bean):
if self.__mbean is None:
return []
return self.__mbean.getBeanFields(bean)
def getBeanNames(self):
if self.__mbean is None:
return []
return self.__mbean.getBeanNames()
def getMultiBeanFields(self, name, fieldList):
if self.__mbean is None:
return {}
return self.__mbean.getAttributes(name, fieldList)
def getNonstoppedConnectorsString(self):
"""
Return string describing states of all connectors
which have not yet stopped
"""
try:
connStates = self.__client.xmlrpc.listConnectorStates()
except:
self.__log.error(exc_string())
connStates = []
csStr = None
for cs in connStates:
if cs["state"] == 'idle':
continue
if csStr is None:
csStr = '['
else:
csStr += ', '
csStr += '%s:%s' % (cs["type"], cs["state"])
if csStr is None:
csStr = ''
else:
csStr += ']'
return csStr
def getSingleBeanField(self, name, field):
if self.__mbean is None:
return None
return self.__mbean.get(name, field)
def host(self):
return self.__host
def id(self):
return self.__id
def isSource(self):
"Is this component a source of data?"
# XXX Hack for stringHubs which are sources but which confuse
# things by also reading requests from the eventBuilder
if self.isHub():
return True
for conn in self.__connectors:
if conn.isInput():
return False
return True
def listConnectorStates(self):
return self.__client.xmlrpc.listConnectorStates()
def logTo(self, logIP, logPort, liveIP, livePort):
"Send log messages to the specified host and port"
self.__log.openLog(logIP, logPort, liveIP, livePort)
if logIP is None:
logIP = ''
if logPort is None:
logPort = 0
if liveIP is None:
liveIP = ''
if livePort is None:
livePort = 0
self.__client.xmlrpc.logTo(logIP, logPort, liveIP, livePort)
infoStr = self.__client.xmlrpc.getVersionInfo()
self.__log.debug(("Version info: %(filename)s %(revision)s" +
" %(date)s %(time)s %(author)s %(release)s" +
" %(repo_rev)s") % get_version_info(infoStr))
def map(self):
return { "id" : self.__id,
"compName" : self.name(),
"compNum" : self.num(),
"host" : self.__host,
"rpcPort" : self.__port,
"mbeanPort" : self.__mbeanPort }
def mbeanPort(self):
return self.__mbeanPort
def monitor(self):
"Return the monitoring value"
return self.state()
def order(self):
return self.__cmdOrder
def port(self):
return self.__port
def prepareSubrun(self, subrunNum):
"Start marking events as bogus in preparation for subrun"
try:
return self.__client.xmlrpc.prepareSubrun(subrunNum)
except:
self.__log.error(exc_string())
return None
def reset(self):
"Reset component back to the idle state"
self.__log.closeLog()
return self.__client.xmlrpc.reset()
def | (self):
"Reset component back to the idle state"
self.__log.resetLog()
return self.__client.xmlrpc.resetLogging()
def setOrder(self, orderNum):
self.__cmdOrder = orderNum
def startRun(self, runNum):
"Start component processing DAQ data"
try:
return self.__client.xmlrpc.startRun(runNum)
except:
self.__log.error(exc_string())
return None
def startSubrun(self, data):
"Send subrun data to stringHubs"
try:
return self.__client.xmlrpc.startSubrun(data)
except:
self.__log.error(exc_string())
return None
def state(self):
"Get current state"
try:
state = self.__client.xmlrpc.getState()
except socket.error:
state = None
except:
self.__log.error(exc_string())
state = None
if not state:
self.__deadCount += 1
if self.__deadCount < 3:
state = DAQClient.STATE_MISSING
else:
state = DAQClient.STATE_DEAD
return state
def stopRun(self):
"Stop component processing DAQ data"
try:
return self.__client.xmlrpc.stopRun()
except:
self.__log.error(exc_string())
return None
def terminate(self):
"Terminate component"
state = self.state()
if state != "idle" and state != "ready" and \
state != self.STATE_MISSING and state != self.STATE_DEAD:
raise DAQClientException("%s state is %s" % (self, state))
self.__log.closeFinal()
try:
self.__client.xmlrpc.terminate()
except:
# ignore termination exceptions
pass
| resetLogging | identifier_name |
lib.rs | // Copyright 2020-2022 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A Rust client for the NATS.io ecosystem.
//!
//! `git clone https://github.com/nats-io/nats.rs`
//!
//! NATS.io is a simple, secure and high performance open source messaging
//! system for cloud native applications, `IoT` messaging, and microservices
//! architectures.
//!
//! For async API refer to the [`asynk`] module.
//!
//! For more information see [https://nats.io/].
//!
//! [https://nats.io/]: https://nats.io/
//!
//! ## Examples
//!
//! `> cargo run --example nats-box -- -h`
//!
//! Basic connections, and those with options. The compiler will force these to
//! be correct.
//!
//! ```no_run
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let nc = nats::connect("demo.nats.io")?;
//!
//! let nc2 = nats::Options::with_user_pass("derek", "s3cr3t!")
//! .with_name("My Rust NATS App")
//! .connect("127.0.0.1")?;
//!
//! let nc3 = nats::Options::with_credentials("path/to/my.creds").connect("connect.ngs.global")?;
//!
//! let nc4 = nats::Options::new()
//! .add_root_certificate("my-certs.pem")
//! .connect("tls://demo.nats.io:4443")?;
//! # Ok(()) }
//! ```
//!
//! ### Publish
//!
//! ```
//! # fn main() -> std::io::Result<()> {
//! let nc = nats::connect("demo.nats.io")?;
//! nc.publish("my.subject", "Hello World!")?;
//!
//! nc.publish("my.subject", "my message")?;
//!
//! // Publish a request manually.
//! let reply = nc.new_inbox();
//! let rsub = nc.subscribe(&reply)?;
//! nc.publish_request("my.subject", &reply, "Help me!")?;
//! # Ok(()) }
//! ```
//!
//! ### Subscribe
//!
//! ```no_run
//! # fn main() -> std::io::Result<()> {
//! # use std::time::Duration;
//! let nc = nats::connect("demo.nats.io")?;
//! let sub = nc.subscribe("foo")?;
//! for msg in sub.messages() {}
//!
//! // Using next.
//! if let Some(msg) = sub.next() {}
//!
//! // Other iterators.
//! for msg in sub.try_iter() {}
//! for msg in sub.timeout_iter(Duration::from_secs(10)) {}
//!
//! // Using a threaded handler.
//! let sub = nc.subscribe("bar")?.with_handler(move |msg| {
//! println!("Received {}", &msg);
//! Ok(())
//! });
//!
//! // Queue subscription.
//! let qsub = nc.queue_subscribe("foo", "my_group")?;
//! # Ok(()) }
//! ```
//!
//! ### Request/Response
//!
//! ```no_run
//! # use std::time::Duration;
//! # fn main() -> std::io::Result<()> {
//! let nc = nats::connect("demo.nats.io")?;
//! let resp = nc.request("foo", "Help me?")?;
//!
//! // With a timeout.
//! let resp = nc.request_timeout("foo", "Help me?", Duration::from_secs(2))?;
//!
//! // With multiple responses.
//! for msg in nc.request_multi("foo", "Help")?.iter() {}
//!
//! // Publish a request manually.
//! let reply = nc.new_inbox();
//! let rsub = nc.subscribe(&reply)?;
//! nc.publish_request("foo", &reply, "Help me!")?;
//! let response = rsub.iter().take(1);
//! # Ok(()) }
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(
feature = "fault_injection",
deny(
future_incompatible,
missing_copy_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused,
unused_qualifications
)
)]
#![cfg_attr(feature = "fault_injection", deny(
// over time, consider enabling the following commented-out lints:
// clippy::else_if_without_else,
// clippy::indexing_slicing,
// clippy::multiple_crate_versions,
// clippy::missing_const_for_fn,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::checked_conversions,
clippy::decimal_literal_representation,
clippy::doc_markdown,
clippy::empty_enum,
clippy::explicit_into_iter_loop,
clippy::explicit_iter_loop,
clippy::expl_impl_clone_on_copy,
clippy::fallible_impl_from,
clippy::filter_map_next,
clippy::float_arithmetic,
clippy::get_unwrap,
clippy::if_not_else,
clippy::inline_always,
clippy::invalid_upcast_comparisons,
clippy::items_after_statements,
clippy::manual_filter_map,
clippy::manual_find_map,
clippy::map_flatten,
clippy::map_unwrap_or,
clippy::match_same_arms,
clippy::maybe_infinite_iter,
clippy::mem_forget,
clippy::needless_borrow,
clippy::needless_continue,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::path_buf_push_overwrite,
clippy::print_stdout,
clippy::single_match_else,
clippy::string_add,
clippy::string_add_assign,
clippy::type_repetition_in_bounds,
clippy::unicode_not_nfc,
clippy::unimplemented,
clippy::unseparated_literal_suffix,
clippy::wildcard_dependencies,
clippy::wildcard_enum_match_arm,
))]
#![allow(
clippy::match_like_matches_macro,
clippy::await_holding_lock,
clippy::shadow_reuse,
clippy::shadow_same,
clippy::shadow_unrelated,
clippy::wildcard_enum_match_arm,
clippy::module_name_repetitions
)]
// As this is a deprecated client, we don't want warnings from new lints to make CI red.
#![allow(clippy::all)]
#![allow(warnings)]
/// Async-enabled NATS client.
pub mod asynk;
mod auth_utils;
mod client;
mod connect;
mod connector;
mod message;
mod options;
mod proto;
mod secure_wipe;
mod subscription;
/// Header constants and types.
pub mod header;
/// `JetStream` stream management and consumers.
pub mod jetstream;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod kv;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod object_store;
#[cfg(feature = "fault_injection")]
mod fault_injection;
#[cfg(feature = "fault_injection")]
use fault_injection::{inject_delay, inject_io_failure};
#[cfg(not(feature = "fault_injection"))]
fn inject_delay() {}
#[cfg(not(feature = "fault_injection"))]
fn inject_io_failure() -> io::Result<()> {
Ok(())
}
// comment out until we reach MSRV 1.54.0
// #[doc = include_str!("../docs/migration-guide-0.17.0.md")]
// #[derive(Copy, Clone)]
// pub struct Migration0170;
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "this has been renamed to `Options`.")]
pub type ConnectionOptions = Options;
#[doc(hidden)]
#[deprecated(since = "0.17.0", note = "this has been moved to `header::HeaderMap`.")]
pub type Headers = HeaderMap;
pub use header::HeaderMap;
use std::{
io::{self, Error, ErrorKind},
sync::Arc,
time::{Duration, Instant},
};
use lazy_static::lazy_static;
use regex::Regex;
pub use connector::{IntoServerList, ServerAddress};
pub use jetstream::JetStreamOptions;
pub use message::Message;
pub use options::Options;
pub use subscription::{Handler, Subscription};
/// A re-export of the `rustls` crate used in this crate,
/// for use in cases where manual client configurations
/// must be provided using `Options::tls_client_config`.
pub use rustls;
#[doc(hidden)]
pub use connect::ConnectInfo;
use client::Client;
use options::AuthStyle;
use secure_wipe::{SecureString, SecureVec};
const VERSION: &str = env!("CARGO_PKG_VERSION");
const LANG: &str = "rust";
const DEFAULT_FLUSH_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
static ref VERSION_RE: Regex = Regex::new(r#"\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?"#).unwrap();
}
/// Information sent by the server back to this client
/// during initial connection, and possibly again later.
#[allow(unused)]
#[derive(Debug, Default, Clone)]
pub struct ServerInfo {
/// The unique identifier of the NATS server.
pub server_id: String,
/// Generated Server Name.
pub server_name: String,
/// The host specified in the cluster parameter/options.
pub host: String,
/// The port number specified in the cluster parameter/options.
pub port: u16,
/// The version of the NATS server.
pub version: String,
/// If this is set, then the server should try to authenticate upon
/// connect.
pub auth_required: bool,
/// If this is set, then the server must authenticate using TLS.
pub tls_required: bool,
/// Maximum payload size that the server will accept.
pub max_payload: usize,
/// The protocol version in use.
pub proto: i8,
/// The server-assigned client ID. This may change during reconnection.
pub client_id: u64,
/// The version of golang the NATS server was built with.
pub go: String,
/// The nonce used for nkeys.
pub nonce: String,
/// A list of server urls that a client can connect to.
pub connect_urls: Vec<String>,
/// The client IP as known by the server.
pub client_ip: String,
/// Whether the server supports headers.
pub headers: bool,
/// Whether server goes into lame duck mode.
pub lame_duck_mode: bool,
}
impl ServerInfo {
fn parse(s: &str) -> Option<ServerInfo> {
let mut obj = json::parse(s).ok()?;
Some(ServerInfo {
server_id: obj["server_id"].take_string()?,
server_name: obj["server_name"].take_string().unwrap_or_default(),
host: obj["host"].take_string()?,
port: obj["port"].as_u16()?,
version: obj["version"].take_string()?,
auth_required: obj["auth_required"].as_bool().unwrap_or(false),
tls_required: obj["tls_required"].as_bool().unwrap_or(false),
max_payload: obj["max_payload"].as_usize()?,
proto: obj["proto"].as_i8()?,
client_id: obj["client_id"].as_u64()?,
go: obj["go"].take_string()?,
nonce: obj["nonce"].take_string().unwrap_or_default(),
connect_urls: obj["connect_urls"]
.members_mut()
.filter_map(|m| m.take_string())
.collect(),
client_ip: obj["client_ip"].take_string().unwrap_or_default(),
headers: obj["headers"].as_bool().unwrap_or(false),
lame_duck_mode: obj["ldm"].as_bool().unwrap_or(false),
})
}
}
/// A NATS connection.
#[derive(Clone, Debug)]
pub struct Connection(pub(crate) Arc<Inner>);
#[derive(Clone, Debug)]
struct Inner {
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
self.client.shutdown();
}
}
/// Connect to one or more NATS servers at the given URLs.
///
/// The [`IntoServerList`] trait allows to pass URLs in various different formats. Furthermore, if
/// you need more control of the connection's parameters use [`Options::connect()`].
///
/// **Warning:** There are asynchronous errors that can happen during operation of NATS client.
/// To handle them, add handler for [`Options::error_callback()`].
///
/// # Examples
///
/// If no scheme is provided the `nats://` scheme is assumed. The default port is `4222`.
/// ```no_run
/// let nc = nats::connect("demo.nats.io")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// It is possible to provide several URLs as a comma separated list.
/// ```no_run
/// let nc = nats::connect("demo.nats.io,tls://demo.nats.io:4443")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Alternatively, an array of strings can be passed.
/// ```no_run
/// # use nats::IntoServerList;
/// let nc = nats::connect(&["demo.nats.io", "tls://demo.nats.io:4443"])?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Instead of using strings, [`ServerAddress`]es can be used directly as well. This is handy for
/// validating user input.
/// ```no_run
/// use nats::ServerAddress;
/// use std::io;
/// use structopt::StructOpt;
///
/// #[derive(Debug, StructOpt)]
/// struct Config {
/// #[structopt(short, long = "server", default_value = "demo.nats.io")]
/// servers: Vec<ServerAddress>,
/// }
///
/// fn main() -> io::Result<()> {
/// let config = Config::from_args();
/// let nc = nats::connect(config.servers)?;
/// Ok(())
/// }
/// ```
pub fn connect<I: IntoServerList>(nats_urls: I) -> io::Result<Connection> {
Options::new().connect(nats_urls)
}
impl Connection {
/// Connects on one or more NATS servers with the given options.
///
/// For more on how to use [`IntoServerList`] trait see [`crate::connect()`].
pub(crate) fn connect_with_options<I>(urls: I, options: Options) -> io::Result<Connection>
where
I: IntoServerList,
{
let urls = urls.into_server_list()?;
let client = Client::connect(urls, options)?;
client.flush(DEFAULT_FLUSH_TIMEOUT)?;
Ok(Connection(Arc::new(Inner { client })))
}
/// Create a subscription for the given NATS connection.
///
/// # Example
/// ```
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo")?;
/// # Ok(())
/// # }
/// ```
pub fn subscribe(&self, subject: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, None)
}
/// Create a queue subscription for the given NATS connection.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.queue_subscribe("foo", "production")?;
/// # Ok(())
/// # }
/// ```
pub fn queue_subscribe(&self, subject: &str, queue: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, Some(queue))
}
/// Publish a message on the given subject.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.publish("foo", "Hello World!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<()> {
self.publish_with_reply_or_headers(subject, None, None, msg)
}
/// Publish a message on the given subject with a reply subject for
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// nc.publish_request("foo", &reply, "Help me!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish_request(
&self,
subject: &str,
reply: &str,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0
.client
.publish(subject, Some(reply), None, msg.as_ref())
}
/// Create a new globally unique inbox which can be used for replies.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// # Ok(())
/// # }
/// ```
pub fn new_inbox(&self) -> String {
format!("_INBOX.{}", nuid::next())
}
/// Publish a message on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request("foo", "Help me?")?;
/// # Ok(())
/// # }
/// ```
pub fn request(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if no
/// response is received.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request_timeout("foo", "Help me?", std::time::Duration::from_secs(2))?;
/// # Ok(())
/// # }
/// ```
pub fn request_timeout(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
timeout: Duration,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, Some(timeout), msg)
}
/// Publish a message with headers on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
headers: &HeaderMap,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, Some(headers), None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if it was set to `Some` if no
/// response is received. It also allows passing headers.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn | (
&self,
subject: &str,
maybe_headers: Option<&HeaderMap>,
maybe_timeout: Option<Duration>,
msg: impl AsRef<[u8]>,
) -> io::Result<Message> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), maybe_headers, msg)?;
// Wait for the response
let result = if let Some(timeout) = maybe_timeout {
sub.next_timeout(timeout)
} else if let Some(msg) = sub.next() {
Ok(msg)
} else {
Err(ErrorKind::ConnectionReset.into())
};
// Check for no responder status.
if let Ok(msg) = result.as_ref() {
if msg.is_no_responders() {
return Err(Error::new(ErrorKind::NotFound, "no responders"));
}
}
result
}
/// Publish a message on the given subject as a request and allow multiple
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// for msg in nc.request_multi("foo", "Help")?.iter().take(1) {}
/// # Ok(())
/// # }
/// ```
pub fn request_multi(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Subscription> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), None, msg)?;
// Return the subscription.
Ok(sub)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server does not
/// respond with in 10 seconds. Will fail with `NotConnected` if the
/// server is not currently connected. Will fail with `BrokenPipe` if
/// the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush()?;
/// # Ok(())
/// # }
/// ```
pub fn flush(&self) -> io::Result<()> {
self.flush_timeout(DEFAULT_FLUSH_TIMEOUT)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server takes
/// longer than this duration to respond. Will fail with `NotConnected`
/// if the server is not currently connected. Will fail with
/// `BrokenPipe` if the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # use std::time::Duration;
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush_timeout(Duration::from_secs(5))?;
/// # Ok(())
/// # }
/// ```
pub fn flush_timeout(&self, duration: Duration) -> io::Result<()> {
self.0.client.flush(duration)
}
/// Close a NATS connection. All clones of
/// this `Connection` will also be closed,
/// as the backing IO threads are shared.
///
/// If the client is currently connected
/// to a server, the outbound write buffer
/// will be flushed in the process of
/// shutting down.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.close();
/// # Ok(())
/// # }
/// ```
pub fn close(self) {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT).ok();
self.0.client.close();
}
/// Calculates the round trip time between this client and the server,
/// if the server is currently connected. Fails with `TimedOut` if
/// the server takes more than 10 seconds to respond.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("server rtt: {:?}", nc.rtt());
/// # Ok(())
/// # }
/// ```
pub fn rtt(&self) -> io::Result<Duration> {
let start = Instant::now();
self.flush()?;
Ok(start.elapsed())
}
/// Returns true if the version is compatible with the version components.
pub fn is_server_compatible_version(&self, major: i64, minor: i64, patch: i64) -> bool {
let server_info = self.0.client.server_info();
let server_version_captures = VERSION_RE.captures(&server_info.version).unwrap();
let server_major = server_version_captures
.get(1)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_minor = server_version_captures
.get(2)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_patch = server_version_captures
.get(3)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
if server_major < major
|| (server_major == major && server_minor < minor)
|| (server_major == major && server_minor == minor && server_patch < patch)
{
return false;
}
true
}
/// Returns the client IP as known by the server.
/// Supported as of server version 2.1.6.
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_ip());
/// # Ok(())
/// # }
/// ```
pub fn client_ip(&self) -> io::Result<std::net::IpAddr> {
let info = self.0.client.server_info();
match info.client_ip.as_str() {
"" => Err(Error::new(
ErrorKind::Other,
&*format!(
"client_ip was not provided by the server. It is \
supported on servers above version 2.1.6. The server \
version is {}",
info.version
),
)),
ip => match ip.parse() {
Ok(addr) => Ok(addr),
Err(_) => Err(Error::new(
ErrorKind::InvalidData,
&*format!(
"client_ip provided by the server cannot be parsed. \
The server provided IP: {}",
info.client_ip
),
)),
},
}
}
/// Returns the client ID as known by the most recently connected server.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_id());
/// # Ok(())
/// # }
/// ```
pub fn client_id(&self) -> u64 {
self.0.client.server_info().client_id
}
/// Send an unsubscription for all subs then flush the connection, allowing
/// any unprocessed messages to be handled by a handler function if one
/// is configured.
///
/// After the flush returns, we know that a round-trip to the server has
/// happened after it received our unsubscription, so we shut down the
/// subscriber afterwards.
///
/// A similar method exists for the `Subscription` struct which will drain
/// a single `Subscription` without shutting down the entire connection
/// afterward.
///
/// # Example
/// ```no_run
/// # use std::sync::{Arc, atomic::{AtomicBool, Ordering::SeqCst}};
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let received = Arc::new(AtomicBool::new(false));
/// let received_2 = received.clone();
///
/// nc.subscribe("test.drain")?.with_handler(move |m| {
/// received_2.store(true, SeqCst);
/// Ok(())
/// });
///
/// nc.publish("test.drain", "message")?;
/// nc.drain()?;
///
/// # std::thread::sleep(std::time::Duration::from_secs(1));
///
/// assert!(received.load(SeqCst));
///
/// # Ok(())
/// # }
/// ```
pub fn drain(&self) -> io::Result<()> {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT)?;
self.0.client.close();
Ok(())
}
/// Publish a message which may have a reply subject or headers set.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo.headers")?;
/// let headers = [("header1", "value1"), ("header2", "value2")]
/// .iter()
/// .collect();
/// let reply_to = None;
/// nc.publish_with_reply_or_headers("foo.headers", reply_to, Some(&headers), "Hello World!")?;
/// nc.flush()?;
/// let message = sub.next_timeout(std::time::Duration::from_secs(2)).unwrap();
/// assert_eq!(message.headers.unwrap().len(), 2);
/// # Ok(())
/// # }
/// ```
pub fn publish_with_reply_or_headers(
&self,
subject: &str,
reply: Option<&str>,
headers: Option<&HeaderMap>,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0.client.publish(subject, reply, headers, msg.as_ref())
}
/// Returns the maximum payload size the most recently
/// connected server will accept.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// let nc = nats::connect("demo.nats.io")?;
/// println!("max payload: {:?}", nc.max_payload());
/// # Ok(())
/// # }
pub fn max_payload(&self) -> usize {
self.0.client.server_info.lock().max_payload
}
fn do_subscribe(&self, subject: &str, queue: Option<&str>) -> io::Result<Subscription> {
let (sid, receiver) = self.0.client.subscribe(subject, queue)?;
Ok(Subscription::new(
sid,
subject.to_string(),
receiver,
self.0.client.clone(),
))
}
/// Attempts to publish a message without blocking.
#[doc(hidden)]
pub fn try_publish_with_reply_or_headers(
&self,
subject: &str,
reply: Option<&str>,
headers: Option<&HeaderMap>,
msg: impl AsRef<[u8]>,
) -> Option<io::Result<()>> {
self.0
.client
.try_publish(subject, reply, headers, msg.as_ref())
}
}
| request_with_headers_or_timeout | identifier_name |
lib.rs | // Copyright 2020-2022 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A Rust client for the NATS.io ecosystem.
//!
//! `git clone https://github.com/nats-io/nats.rs`
//!
//! NATS.io is a simple, secure and high performance open source messaging
//! system for cloud native applications, `IoT` messaging, and microservices
//! architectures.
//!
//! For async API refer to the [`asynk`] module.
//!
//! For more information see [https://nats.io/].
//!
//! [https://nats.io/]: https://nats.io/
//!
//! ## Examples
//!
//! `> cargo run --example nats-box -- -h`
//!
//! Basic connections, and those with options. The compiler will force these to
//! be correct.
//!
//! ```no_run
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let nc = nats::connect("demo.nats.io")?;
//!
//! let nc2 = nats::Options::with_user_pass("derek", "s3cr3t!")
//! .with_name("My Rust NATS App")
//! .connect("127.0.0.1")?;
//!
//! let nc3 = nats::Options::with_credentials("path/to/my.creds").connect("connect.ngs.global")?;
//!
//! let nc4 = nats::Options::new()
//! .add_root_certificate("my-certs.pem")
//! .connect("tls://demo.nats.io:4443")?;
//! # Ok(()) }
//! ```
//!
//! ### Publish
//!
//! ```
//! # fn main() -> std::io::Result<()> {
//! let nc = nats::connect("demo.nats.io")?;
//! nc.publish("my.subject", "Hello World!")?;
//!
//! nc.publish("my.subject", "my message")?;
//!
//! // Publish a request manually.
//! let reply = nc.new_inbox();
//! let rsub = nc.subscribe(&reply)?;
//! nc.publish_request("my.subject", &reply, "Help me!")?;
//! # Ok(()) }
//! ```
//!
//! ### Subscribe
//!
//! ```no_run
//! # fn main() -> std::io::Result<()> {
//! # use std::time::Duration;
//! let nc = nats::connect("demo.nats.io")?;
//! let sub = nc.subscribe("foo")?;
//! for msg in sub.messages() {}
//!
//! // Using next.
//! if let Some(msg) = sub.next() {}
//!
//! // Other iterators.
//! for msg in sub.try_iter() {}
//! for msg in sub.timeout_iter(Duration::from_secs(10)) {}
//!
//! // Using a threaded handler.
//! let sub = nc.subscribe("bar")?.with_handler(move |msg| {
//! println!("Received {}", &msg);
//! Ok(())
//! });
//!
//! // Queue subscription.
//! let qsub = nc.queue_subscribe("foo", "my_group")?;
//! # Ok(()) }
//! ```
//!
//! ### Request/Response
//!
//! ```no_run
//! # use std::time::Duration;
//! # fn main() -> std::io::Result<()> {
//! let nc = nats::connect("demo.nats.io")?;
//! let resp = nc.request("foo", "Help me?")?;
//!
//! // With a timeout.
//! let resp = nc.request_timeout("foo", "Help me?", Duration::from_secs(2))?;
//!
//! // With multiple responses.
//! for msg in nc.request_multi("foo", "Help")?.iter() {}
//!
//! // Publish a request manually.
//! let reply = nc.new_inbox();
//! let rsub = nc.subscribe(&reply)?;
//! nc.publish_request("foo", &reply, "Help me!")?;
//! let response = rsub.iter().take(1);
//! # Ok(()) }
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(
feature = "fault_injection",
deny(
future_incompatible,
missing_copy_implementations, | unsafe_code,
unused,
unused_qualifications
)
)]
#![cfg_attr(feature = "fault_injection", deny(
// over time, consider enabling the following commented-out lints:
// clippy::else_if_without_else,
// clippy::indexing_slicing,
// clippy::multiple_crate_versions,
// clippy::missing_const_for_fn,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::checked_conversions,
clippy::decimal_literal_representation,
clippy::doc_markdown,
clippy::empty_enum,
clippy::explicit_into_iter_loop,
clippy::explicit_iter_loop,
clippy::expl_impl_clone_on_copy,
clippy::fallible_impl_from,
clippy::filter_map_next,
clippy::float_arithmetic,
clippy::get_unwrap,
clippy::if_not_else,
clippy::inline_always,
clippy::invalid_upcast_comparisons,
clippy::items_after_statements,
clippy::manual_filter_map,
clippy::manual_find_map,
clippy::map_flatten,
clippy::map_unwrap_or,
clippy::match_same_arms,
clippy::maybe_infinite_iter,
clippy::mem_forget,
clippy::needless_borrow,
clippy::needless_continue,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::path_buf_push_overwrite,
clippy::print_stdout,
clippy::single_match_else,
clippy::string_add,
clippy::string_add_assign,
clippy::type_repetition_in_bounds,
clippy::unicode_not_nfc,
clippy::unimplemented,
clippy::unseparated_literal_suffix,
clippy::wildcard_dependencies,
clippy::wildcard_enum_match_arm,
))]
#![allow(
clippy::match_like_matches_macro,
clippy::await_holding_lock,
clippy::shadow_reuse,
clippy::shadow_same,
clippy::shadow_unrelated,
clippy::wildcard_enum_match_arm,
clippy::module_name_repetitions
)]
// As this is a deprecated client, we don't want warnings from new lints to make CI red.
#![allow(clippy::all)]
#![allow(warnings)]
/// Async-enabled NATS client.
pub mod asynk;
mod auth_utils;
mod client;
mod connect;
mod connector;
mod message;
mod options;
mod proto;
mod secure_wipe;
mod subscription;
/// Header constants and types.
pub mod header;
/// `JetStream` stream management and consumers.
pub mod jetstream;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod kv;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod object_store;
#[cfg(feature = "fault_injection")]
mod fault_injection;
#[cfg(feature = "fault_injection")]
use fault_injection::{inject_delay, inject_io_failure};
#[cfg(not(feature = "fault_injection"))]
fn inject_delay() {}
#[cfg(not(feature = "fault_injection"))]
fn inject_io_failure() -> io::Result<()> {
Ok(())
}
// comment out until we reach MSRV 1.54.0
// #[doc = include_str!("../docs/migration-guide-0.17.0.md")]
// #[derive(Copy, Clone)]
// pub struct Migration0170;
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "this has been renamed to `Options`.")]
pub type ConnectionOptions = Options;
#[doc(hidden)]
#[deprecated(since = "0.17.0", note = "this has been moved to `header::HeaderMap`.")]
pub type Headers = HeaderMap;
pub use header::HeaderMap;
use std::{
io::{self, Error, ErrorKind},
sync::Arc,
time::{Duration, Instant},
};
use lazy_static::lazy_static;
use regex::Regex;
pub use connector::{IntoServerList, ServerAddress};
pub use jetstream::JetStreamOptions;
pub use message::Message;
pub use options::Options;
pub use subscription::{Handler, Subscription};
/// A re-export of the `rustls` crate used in this crate,
/// for use in cases where manual client configurations
/// must be provided using `Options::tls_client_config`.
pub use rustls;
#[doc(hidden)]
pub use connect::ConnectInfo;
use client::Client;
use options::AuthStyle;
use secure_wipe::{SecureString, SecureVec};
const VERSION: &str = env!("CARGO_PKG_VERSION");
const LANG: &str = "rust";
const DEFAULT_FLUSH_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
static ref VERSION_RE: Regex = Regex::new(r#"\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?"#).unwrap();
}
/// Information sent by the server back to this client
/// during initial connection, and possibly again later.
#[allow(unused)]
#[derive(Debug, Default, Clone)]
pub struct ServerInfo {
/// The unique identifier of the NATS server.
pub server_id: String,
/// Generated Server Name.
pub server_name: String,
/// The host specified in the cluster parameter/options.
pub host: String,
/// The port number specified in the cluster parameter/options.
pub port: u16,
/// The version of the NATS server.
pub version: String,
/// If this is set, then the server should try to authenticate upon
/// connect.
pub auth_required: bool,
/// If this is set, then the server must authenticate using TLS.
pub tls_required: bool,
/// Maximum payload size that the server will accept.
pub max_payload: usize,
/// The protocol version in use.
pub proto: i8,
/// The server-assigned client ID. This may change during reconnection.
pub client_id: u64,
/// The version of golang the NATS server was built with.
pub go: String,
/// The nonce used for nkeys.
pub nonce: String,
/// A list of server urls that a client can connect to.
pub connect_urls: Vec<String>,
/// The client IP as known by the server.
pub client_ip: String,
/// Whether the server supports headers.
pub headers: bool,
/// Whether server goes into lame duck mode.
pub lame_duck_mode: bool,
}
impl ServerInfo {
fn parse(s: &str) -> Option<ServerInfo> {
let mut obj = json::parse(s).ok()?;
Some(ServerInfo {
server_id: obj["server_id"].take_string()?,
server_name: obj["server_name"].take_string().unwrap_or_default(),
host: obj["host"].take_string()?,
port: obj["port"].as_u16()?,
version: obj["version"].take_string()?,
auth_required: obj["auth_required"].as_bool().unwrap_or(false),
tls_required: obj["tls_required"].as_bool().unwrap_or(false),
max_payload: obj["max_payload"].as_usize()?,
proto: obj["proto"].as_i8()?,
client_id: obj["client_id"].as_u64()?,
go: obj["go"].take_string()?,
nonce: obj["nonce"].take_string().unwrap_or_default(),
connect_urls: obj["connect_urls"]
.members_mut()
.filter_map(|m| m.take_string())
.collect(),
client_ip: obj["client_ip"].take_string().unwrap_or_default(),
headers: obj["headers"].as_bool().unwrap_or(false),
lame_duck_mode: obj["ldm"].as_bool().unwrap_or(false),
})
}
}
/// A NATS connection.
#[derive(Clone, Debug)]
pub struct Connection(pub(crate) Arc<Inner>);
#[derive(Clone, Debug)]
struct Inner {
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
self.client.shutdown();
}
}
/// Connect to one or more NATS servers at the given URLs.
///
/// The [`IntoServerList`] trait allows to pass URLs in various different formats. Furthermore, if
/// you need more control of the connection's parameters use [`Options::connect()`].
///
/// **Warning:** There are asynchronous errors that can happen during operation of NATS client.
/// To handle them, add handler for [`Options::error_callback()`].
///
/// # Examples
///
/// If no scheme is provided the `nats://` scheme is assumed. The default port is `4222`.
/// ```no_run
/// let nc = nats::connect("demo.nats.io")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// It is possible to provide several URLs as a comma separated list.
/// ```no_run
/// let nc = nats::connect("demo.nats.io,tls://demo.nats.io:4443")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Alternatively, an array of strings can be passed.
/// ```no_run
/// # use nats::IntoServerList;
/// let nc = nats::connect(&["demo.nats.io", "tls://demo.nats.io:4443"])?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Instead of using strings, [`ServerAddress`]es can be used directly as well. This is handy for
/// validating user input.
/// ```no_run
/// use nats::ServerAddress;
/// use std::io;
/// use structopt::StructOpt;
///
/// #[derive(Debug, StructOpt)]
/// struct Config {
/// #[structopt(short, long = "server", default_value = "demo.nats.io")]
/// servers: Vec<ServerAddress>,
/// }
///
/// fn main() -> io::Result<()> {
/// let config = Config::from_args();
/// let nc = nats::connect(config.servers)?;
/// Ok(())
/// }
/// ```
pub fn connect<I: IntoServerList>(nats_urls: I) -> io::Result<Connection> {
Options::new().connect(nats_urls)
}
impl Connection {
/// Connects on one or more NATS servers with the given options.
///
/// For more on how to use [`IntoServerList`] trait see [`crate::connect()`].
pub(crate) fn connect_with_options<I>(urls: I, options: Options) -> io::Result<Connection>
where
I: IntoServerList,
{
let urls = urls.into_server_list()?;
let client = Client::connect(urls, options)?;
client.flush(DEFAULT_FLUSH_TIMEOUT)?;
Ok(Connection(Arc::new(Inner { client })))
}
/// Create a subscription for the given NATS connection.
///
/// # Example
/// ```
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo")?;
/// # Ok(())
/// # }
/// ```
pub fn subscribe(&self, subject: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, None)
}
/// Create a queue subscription for the given NATS connection.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.queue_subscribe("foo", "production")?;
/// # Ok(())
/// # }
/// ```
pub fn queue_subscribe(&self, subject: &str, queue: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, Some(queue))
}
/// Publish a message on the given subject.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.publish("foo", "Hello World!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<()> {
self.publish_with_reply_or_headers(subject, None, None, msg)
}
/// Publish a message on the given subject with a reply subject for
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// nc.publish_request("foo", &reply, "Help me!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish_request(
&self,
subject: &str,
reply: &str,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0
.client
.publish(subject, Some(reply), None, msg.as_ref())
}
/// Create a new globally unique inbox which can be used for replies.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// # Ok(())
/// # }
/// ```
pub fn new_inbox(&self) -> String {
format!("_INBOX.{}", nuid::next())
}
/// Publish a message on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request("foo", "Help me?")?;
/// # Ok(())
/// # }
/// ```
pub fn request(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if no
/// response is received.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request_timeout("foo", "Help me?", std::time::Duration::from_secs(2))?;
/// # Ok(())
/// # }
/// ```
pub fn request_timeout(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
timeout: Duration,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, Some(timeout), msg)
}
/// Publish a message with headers on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
headers: &HeaderMap,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, Some(headers), None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if it was set to `Some` if no
/// response is received. It also allows passing headers.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers_or_timeout(
&self,
subject: &str,
maybe_headers: Option<&HeaderMap>,
maybe_timeout: Option<Duration>,
msg: impl AsRef<[u8]>,
) -> io::Result<Message> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), maybe_headers, msg)?;
// Wait for the response
let result = if let Some(timeout) = maybe_timeout {
sub.next_timeout(timeout)
} else if let Some(msg) = sub.next() {
Ok(msg)
} else {
Err(ErrorKind::ConnectionReset.into())
};
// Check for no responder status.
if let Ok(msg) = result.as_ref() {
if msg.is_no_responders() {
return Err(Error::new(ErrorKind::NotFound, "no responders"));
}
}
result
}
/// Publish a message on the given subject as a request and allow multiple
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// for msg in nc.request_multi("foo", "Help")?.iter().take(1) {}
/// # Ok(())
/// # }
/// ```
pub fn request_multi(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Subscription> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), None, msg)?;
// Return the subscription.
Ok(sub)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server does not
/// respond with in 10 seconds. Will fail with `NotConnected` if the
/// server is not currently connected. Will fail with `BrokenPipe` if
/// the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush()?;
/// # Ok(())
/// # }
/// ```
pub fn flush(&self) -> io::Result<()> {
self.flush_timeout(DEFAULT_FLUSH_TIMEOUT)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server takes
/// longer than this duration to respond. Will fail with `NotConnected`
/// if the server is not currently connected. Will fail with
/// `BrokenPipe` if the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # use std::time::Duration;
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush_timeout(Duration::from_secs(5))?;
/// # Ok(())
/// # }
/// ```
pub fn flush_timeout(&self, duration: Duration) -> io::Result<()> {
self.0.client.flush(duration)
}
/// Close a NATS connection. All clones of
/// this `Connection` will also be closed,
/// as the backing IO threads are shared.
///
/// If the client is currently connected
/// to a server, the outbound write buffer
/// will be flushed in the process of
/// shutting down.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.close();
/// # Ok(())
/// # }
/// ```
pub fn close(self) {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT).ok();
self.0.client.close();
}
/// Calculates the round trip time between this client and the server,
/// if the server is currently connected. Fails with `TimedOut` if
/// the server takes more than 10 seconds to respond.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("server rtt: {:?}", nc.rtt());
/// # Ok(())
/// # }
/// ```
pub fn rtt(&self) -> io::Result<Duration> {
let start = Instant::now();
self.flush()?;
Ok(start.elapsed())
}
/// Returns true if the version is compatible with the version components.
pub fn is_server_compatible_version(&self, major: i64, minor: i64, patch: i64) -> bool {
let server_info = self.0.client.server_info();
let server_version_captures = VERSION_RE.captures(&server_info.version).unwrap();
let server_major = server_version_captures
.get(1)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_minor = server_version_captures
.get(2)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_patch = server_version_captures
.get(3)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
if server_major < major
|| (server_major == major && server_minor < minor)
|| (server_major == major && server_minor == minor && server_patch < patch)
{
return false;
}
true
}
/// Returns the client IP as known by the server.
/// Supported as of server version 2.1.6.
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_ip());
/// # Ok(())
/// # }
/// ```
pub fn client_ip(&self) -> io::Result<std::net::IpAddr> {
let info = self.0.client.server_info();
match info.client_ip.as_str() {
"" => Err(Error::new(
ErrorKind::Other,
&*format!(
"client_ip was not provided by the server. It is \
supported on servers above version 2.1.6. The server \
version is {}",
info.version
),
)),
ip => match ip.parse() {
Ok(addr) => Ok(addr),
Err(_) => Err(Error::new(
ErrorKind::InvalidData,
&*format!(
"client_ip provided by the server cannot be parsed. \
The server provided IP: {}",
info.client_ip
),
)),
},
}
}
/// Returns the client ID as known by the most recently connected server.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_id());
/// # Ok(())
/// # }
/// ```
pub fn client_id(&self) -> u64 {
self.0.client.server_info().client_id
}
/// Send an unsubscription for all subs then flush the connection, allowing
/// any unprocessed messages to be handled by a handler function if one
/// is configured.
///
/// After the flush returns, we know that a round-trip to the server has
/// happened after it received our unsubscription, so we shut down the
/// subscriber afterwards.
///
/// A similar method exists for the `Subscription` struct which will drain
/// a single `Subscription` without shutting down the entire connection
/// afterward.
///
/// # Example
/// ```no_run
/// # use std::sync::{Arc, atomic::{AtomicBool, Ordering::SeqCst}};
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let received = Arc::new(AtomicBool::new(false));
/// let received_2 = received.clone();
///
/// nc.subscribe("test.drain")?.with_handler(move |m| {
/// received_2.store(true, SeqCst);
/// Ok(())
/// });
///
/// nc.publish("test.drain", "message")?;
/// nc.drain()?;
///
/// # std::thread::sleep(std::time::Duration::from_secs(1));
///
/// assert!(received.load(SeqCst));
///
/// # Ok(())
/// # }
/// ```
pub fn drain(&self) -> io::Result<()> {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT)?;
self.0.client.close();
Ok(())
}
/// Publish a message which may have a reply subject or headers set.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo.headers")?;
/// let headers = [("header1", "value1"), ("header2", "value2")]
/// .iter()
/// .collect();
/// let reply_to = None;
/// nc.publish_with_reply_or_headers("foo.headers", reply_to, Some(&headers), "Hello World!")?;
/// nc.flush()?;
/// let message = sub.next_timeout(std::time::Duration::from_secs(2)).unwrap();
/// assert_eq!(message.headers.unwrap().len(), 2);
/// # Ok(())
/// # }
/// ```
pub fn publish_with_reply_or_headers(
&self,
subject: &str,
reply: Option<&str>,
headers: Option<&HeaderMap>,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0.client.publish(subject, reply, headers, msg.as_ref())
}
/// Returns the maximum payload size the most recently
/// connected server will accept.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// let nc = nats::connect("demo.nats.io")?;
/// println!("max payload: {:?}", nc.max_payload());
/// # Ok(())
/// # }
pub fn max_payload(&self) -> usize {
self.0.client.server_info.lock().max_payload
}
fn do_subscribe(&self, subject: &str, queue: Option<&str>) -> io::Result<Subscription> {
let (sid, receiver) = self.0.client.subscribe(subject, queue)?;
Ok(Subscription::new(
sid,
subject.to_string(),
receiver,
self.0.client.clone(),
))
}
/// Attempts to publish a message without blocking.
#[doc(hidden)]
pub fn try_publish_with_reply_or_headers(
&self,
subject: &str,
reply: Option<&str>,
headers: Option<&HeaderMap>,
msg: impl AsRef<[u8]>,
) -> Option<io::Result<()>> {
self.0
.client
.try_publish(subject, reply, headers, msg.as_ref())
}
} | missing_docs,
nonstandard_style,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts, | random_line_split |
lib.rs | // Copyright 2020-2022 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A Rust client for the NATS.io ecosystem.
//!
//! `git clone https://github.com/nats-io/nats.rs`
//!
//! NATS.io is a simple, secure and high performance open source messaging
//! system for cloud native applications, `IoT` messaging, and microservices
//! architectures.
//!
//! For async API refer to the [`asynk`] module.
//!
//! For more information see [https://nats.io/].
//!
//! [https://nats.io/]: https://nats.io/
//!
//! ## Examples
//!
//! `> cargo run --example nats-box -- -h`
//!
//! Basic connections, and those with options. The compiler will force these to
//! be correct.
//!
//! ```no_run
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let nc = nats::connect("demo.nats.io")?;
//!
//! let nc2 = nats::Options::with_user_pass("derek", "s3cr3t!")
//! .with_name("My Rust NATS App")
//! .connect("127.0.0.1")?;
//!
//! let nc3 = nats::Options::with_credentials("path/to/my.creds").connect("connect.ngs.global")?;
//!
//! let nc4 = nats::Options::new()
//! .add_root_certificate("my-certs.pem")
//! .connect("tls://demo.nats.io:4443")?;
//! # Ok(()) }
//! ```
//!
//! ### Publish
//!
//! ```
//! # fn main() -> std::io::Result<()> {
//! let nc = nats::connect("demo.nats.io")?;
//! nc.publish("my.subject", "Hello World!")?;
//!
//! nc.publish("my.subject", "my message")?;
//!
//! // Publish a request manually.
//! let reply = nc.new_inbox();
//! let rsub = nc.subscribe(&reply)?;
//! nc.publish_request("my.subject", &reply, "Help me!")?;
//! # Ok(()) }
//! ```
//!
//! ### Subscribe
//!
//! ```no_run
//! # fn main() -> std::io::Result<()> {
//! # use std::time::Duration;
//! let nc = nats::connect("demo.nats.io")?;
//! let sub = nc.subscribe("foo")?;
//! for msg in sub.messages() {}
//!
//! // Using next.
//! if let Some(msg) = sub.next() {}
//!
//! // Other iterators.
//! for msg in sub.try_iter() {}
//! for msg in sub.timeout_iter(Duration::from_secs(10)) {}
//!
//! // Using a threaded handler.
//! let sub = nc.subscribe("bar")?.with_handler(move |msg| {
//! println!("Received {}", &msg);
//! Ok(())
//! });
//!
//! // Queue subscription.
//! let qsub = nc.queue_subscribe("foo", "my_group")?;
//! # Ok(()) }
//! ```
//!
//! ### Request/Response
//!
//! ```no_run
//! # use std::time::Duration;
//! # fn main() -> std::io::Result<()> {
//! let nc = nats::connect("demo.nats.io")?;
//! let resp = nc.request("foo", "Help me?")?;
//!
//! // With a timeout.
//! let resp = nc.request_timeout("foo", "Help me?", Duration::from_secs(2))?;
//!
//! // With multiple responses.
//! for msg in nc.request_multi("foo", "Help")?.iter() {}
//!
//! // Publish a request manually.
//! let reply = nc.new_inbox();
//! let rsub = nc.subscribe(&reply)?;
//! nc.publish_request("foo", &reply, "Help me!")?;
//! let response = rsub.iter().take(1);
//! # Ok(()) }
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(
feature = "fault_injection",
deny(
future_incompatible,
missing_copy_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused,
unused_qualifications
)
)]
#![cfg_attr(feature = "fault_injection", deny(
// over time, consider enabling the following commented-out lints:
// clippy::else_if_without_else,
// clippy::indexing_slicing,
// clippy::multiple_crate_versions,
// clippy::missing_const_for_fn,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::checked_conversions,
clippy::decimal_literal_representation,
clippy::doc_markdown,
clippy::empty_enum,
clippy::explicit_into_iter_loop,
clippy::explicit_iter_loop,
clippy::expl_impl_clone_on_copy,
clippy::fallible_impl_from,
clippy::filter_map_next,
clippy::float_arithmetic,
clippy::get_unwrap,
clippy::if_not_else,
clippy::inline_always,
clippy::invalid_upcast_comparisons,
clippy::items_after_statements,
clippy::manual_filter_map,
clippy::manual_find_map,
clippy::map_flatten,
clippy::map_unwrap_or,
clippy::match_same_arms,
clippy::maybe_infinite_iter,
clippy::mem_forget,
clippy::needless_borrow,
clippy::needless_continue,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::path_buf_push_overwrite,
clippy::print_stdout,
clippy::single_match_else,
clippy::string_add,
clippy::string_add_assign,
clippy::type_repetition_in_bounds,
clippy::unicode_not_nfc,
clippy::unimplemented,
clippy::unseparated_literal_suffix,
clippy::wildcard_dependencies,
clippy::wildcard_enum_match_arm,
))]
#![allow(
clippy::match_like_matches_macro,
clippy::await_holding_lock,
clippy::shadow_reuse,
clippy::shadow_same,
clippy::shadow_unrelated,
clippy::wildcard_enum_match_arm,
clippy::module_name_repetitions
)]
// As this is a deprecated client, we don't want warnings from new lints to make CI red.
#![allow(clippy::all)]
#![allow(warnings)]
/// Async-enabled NATS client.
pub mod asynk;
mod auth_utils;
mod client;
mod connect;
mod connector;
mod message;
mod options;
mod proto;
mod secure_wipe;
mod subscription;
/// Header constants and types.
pub mod header;
/// `JetStream` stream management and consumers.
pub mod jetstream;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod kv;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod object_store;
#[cfg(feature = "fault_injection")]
mod fault_injection;
#[cfg(feature = "fault_injection")]
use fault_injection::{inject_delay, inject_io_failure};
#[cfg(not(feature = "fault_injection"))]
fn inject_delay() {}
#[cfg(not(feature = "fault_injection"))]
fn inject_io_failure() -> io::Result<()> {
Ok(())
}
// comment out until we reach MSRV 1.54.0
// #[doc = include_str!("../docs/migration-guide-0.17.0.md")]
// #[derive(Copy, Clone)]
// pub struct Migration0170;
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "this has been renamed to `Options`.")]
pub type ConnectionOptions = Options;
#[doc(hidden)]
#[deprecated(since = "0.17.0", note = "this has been moved to `header::HeaderMap`.")]
pub type Headers = HeaderMap;
pub use header::HeaderMap;
use std::{
io::{self, Error, ErrorKind},
sync::Arc,
time::{Duration, Instant},
};
use lazy_static::lazy_static;
use regex::Regex;
pub use connector::{IntoServerList, ServerAddress};
pub use jetstream::JetStreamOptions;
pub use message::Message;
pub use options::Options;
pub use subscription::{Handler, Subscription};
/// A re-export of the `rustls` crate used in this crate,
/// for use in cases where manual client configurations
/// must be provided using `Options::tls_client_config`.
pub use rustls;
#[doc(hidden)]
pub use connect::ConnectInfo;
use client::Client;
use options::AuthStyle;
use secure_wipe::{SecureString, SecureVec};
const VERSION: &str = env!("CARGO_PKG_VERSION");
const LANG: &str = "rust";
const DEFAULT_FLUSH_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
static ref VERSION_RE: Regex = Regex::new(r#"\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?"#).unwrap();
}
/// Information sent by the server back to this client
/// during initial connection, and possibly again later.
#[allow(unused)]
#[derive(Debug, Default, Clone)]
pub struct ServerInfo {
/// The unique identifier of the NATS server.
pub server_id: String,
/// Generated Server Name.
pub server_name: String,
/// The host specified in the cluster parameter/options.
pub host: String,
/// The port number specified in the cluster parameter/options.
pub port: u16,
/// The version of the NATS server.
pub version: String,
/// If this is set, then the server should try to authenticate upon
/// connect.
pub auth_required: bool,
/// If this is set, then the server must authenticate using TLS.
pub tls_required: bool,
/// Maximum payload size that the server will accept.
pub max_payload: usize,
/// The protocol version in use.
pub proto: i8,
/// The server-assigned client ID. This may change during reconnection.
pub client_id: u64,
/// The version of golang the NATS server was built with.
pub go: String,
/// The nonce used for nkeys.
pub nonce: String,
/// A list of server urls that a client can connect to.
pub connect_urls: Vec<String>,
/// The client IP as known by the server.
pub client_ip: String,
/// Whether the server supports headers.
pub headers: bool,
/// Whether server goes into lame duck mode.
pub lame_duck_mode: bool,
}
impl ServerInfo {
fn parse(s: &str) -> Option<ServerInfo> {
let mut obj = json::parse(s).ok()?;
Some(ServerInfo {
server_id: obj["server_id"].take_string()?,
server_name: obj["server_name"].take_string().unwrap_or_default(),
host: obj["host"].take_string()?,
port: obj["port"].as_u16()?,
version: obj["version"].take_string()?,
auth_required: obj["auth_required"].as_bool().unwrap_or(false),
tls_required: obj["tls_required"].as_bool().unwrap_or(false),
max_payload: obj["max_payload"].as_usize()?,
proto: obj["proto"].as_i8()?,
client_id: obj["client_id"].as_u64()?,
go: obj["go"].take_string()?,
nonce: obj["nonce"].take_string().unwrap_or_default(),
connect_urls: obj["connect_urls"]
.members_mut()
.filter_map(|m| m.take_string())
.collect(),
client_ip: obj["client_ip"].take_string().unwrap_or_default(),
headers: obj["headers"].as_bool().unwrap_or(false),
lame_duck_mode: obj["ldm"].as_bool().unwrap_or(false),
})
}
}
/// A NATS connection.
#[derive(Clone, Debug)]
pub struct Connection(pub(crate) Arc<Inner>);
#[derive(Clone, Debug)]
struct Inner {
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
self.client.shutdown();
}
}
/// Connect to one or more NATS servers at the given URLs.
///
/// The [`IntoServerList`] trait allows to pass URLs in various different formats. Furthermore, if
/// you need more control of the connection's parameters use [`Options::connect()`].
///
/// **Warning:** There are asynchronous errors that can happen during operation of NATS client.
/// To handle them, add handler for [`Options::error_callback()`].
///
/// # Examples
///
/// If no scheme is provided the `nats://` scheme is assumed. The default port is `4222`.
/// ```no_run
/// let nc = nats::connect("demo.nats.io")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// It is possible to provide several URLs as a comma separated list.
/// ```no_run
/// let nc = nats::connect("demo.nats.io,tls://demo.nats.io:4443")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Alternatively, an array of strings can be passed.
/// ```no_run
/// # use nats::IntoServerList;
/// let nc = nats::connect(&["demo.nats.io", "tls://demo.nats.io:4443"])?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Instead of using strings, [`ServerAddress`]es can be used directly as well. This is handy for
/// validating user input.
/// ```no_run
/// use nats::ServerAddress;
/// use std::io;
/// use structopt::StructOpt;
///
/// #[derive(Debug, StructOpt)]
/// struct Config {
/// #[structopt(short, long = "server", default_value = "demo.nats.io")]
/// servers: Vec<ServerAddress>,
/// }
///
/// fn main() -> io::Result<()> {
/// let config = Config::from_args();
/// let nc = nats::connect(config.servers)?;
/// Ok(())
/// }
/// ```
pub fn connect<I: IntoServerList>(nats_urls: I) -> io::Result<Connection> {
Options::new().connect(nats_urls)
}
impl Connection {
/// Connects on one or more NATS servers with the given options.
///
/// For more on how to use [`IntoServerList`] trait see [`crate::connect()`].
pub(crate) fn connect_with_options<I>(urls: I, options: Options) -> io::Result<Connection>
where
I: IntoServerList,
{
let urls = urls.into_server_list()?;
let client = Client::connect(urls, options)?;
client.flush(DEFAULT_FLUSH_TIMEOUT)?;
Ok(Connection(Arc::new(Inner { client })))
}
/// Create a subscription for the given NATS connection.
///
/// # Example
/// ```
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo")?;
/// # Ok(())
/// # }
/// ```
pub fn subscribe(&self, subject: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, None)
}
/// Create a queue subscription for the given NATS connection.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.queue_subscribe("foo", "production")?;
/// # Ok(())
/// # }
/// ```
pub fn queue_subscribe(&self, subject: &str, queue: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, Some(queue))
}
/// Publish a message on the given subject.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.publish("foo", "Hello World!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<()> {
self.publish_with_reply_or_headers(subject, None, None, msg)
}
/// Publish a message on the given subject with a reply subject for
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// nc.publish_request("foo", &reply, "Help me!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish_request(
&self,
subject: &str,
reply: &str,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0
.client
.publish(subject, Some(reply), None, msg.as_ref())
}
/// Create a new globally unique inbox which can be used for replies.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// # Ok(())
/// # }
/// ```
pub fn new_inbox(&self) -> String {
format!("_INBOX.{}", nuid::next())
}
/// Publish a message on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request("foo", "Help me?")?;
/// # Ok(())
/// # }
/// ```
pub fn request(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if no
/// response is received.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request_timeout("foo", "Help me?", std::time::Duration::from_secs(2))?;
/// # Ok(())
/// # }
/// ```
pub fn request_timeout(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
timeout: Duration,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, Some(timeout), msg)
}
/// Publish a message with headers on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
headers: &HeaderMap,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, Some(headers), None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if it was set to `Some` if no
/// response is received. It also allows passing headers.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers_or_timeout(
&self,
subject: &str,
maybe_headers: Option<&HeaderMap>,
maybe_timeout: Option<Duration>,
msg: impl AsRef<[u8]>,
) -> io::Result<Message> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), maybe_headers, msg)?;
// Wait for the response
let result = if let Some(timeout) = maybe_timeout {
sub.next_timeout(timeout)
} else if let Some(msg) = sub.next() {
Ok(msg)
} else {
Err(ErrorKind::ConnectionReset.into())
};
// Check for no responder status.
if let Ok(msg) = result.as_ref() {
if msg.is_no_responders() {
return Err(Error::new(ErrorKind::NotFound, "no responders"));
}
}
result
}
/// Publish a message on the given subject as a request and allow multiple
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// for msg in nc.request_multi("foo", "Help")?.iter().take(1) {}
/// # Ok(())
/// # }
/// ```
pub fn request_multi(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Subscription> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), None, msg)?;
// Return the subscription.
Ok(sub)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server does not
/// respond with in 10 seconds. Will fail with `NotConnected` if the
/// server is not currently connected. Will fail with `BrokenPipe` if
/// the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush()?;
/// # Ok(())
/// # }
/// ```
pub fn flush(&self) -> io::Result<()> {
self.flush_timeout(DEFAULT_FLUSH_TIMEOUT)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server takes
/// longer than this duration to respond. Will fail with `NotConnected`
/// if the server is not currently connected. Will fail with
/// `BrokenPipe` if the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # use std::time::Duration;
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush_timeout(Duration::from_secs(5))?;
/// # Ok(())
/// # }
/// ```
pub fn flush_timeout(&self, duration: Duration) -> io::Result<()> {
self.0.client.flush(duration)
}
/// Close a NATS connection. All clones of
/// this `Connection` will also be closed,
/// as the backing IO threads are shared.
///
/// If the client is currently connected
/// to a server, the outbound write buffer
/// will be flushed in the process of
/// shutting down.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.close();
/// # Ok(())
/// # }
/// ```
pub fn close(self) {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT).ok();
self.0.client.close();
}
/// Calculates the round trip time between this client and the server,
/// if the server is currently connected. Fails with `TimedOut` if
/// the server takes more than 10 seconds to respond.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("server rtt: {:?}", nc.rtt());
/// # Ok(())
/// # }
/// ```
pub fn rtt(&self) -> io::Result<Duration> {
let start = Instant::now();
self.flush()?;
Ok(start.elapsed())
}
/// Returns true if the version is compatible with the version components.
pub fn is_server_compatible_version(&self, major: i64, minor: i64, patch: i64) -> bool {
let server_info = self.0.client.server_info();
let server_version_captures = VERSION_RE.captures(&server_info.version).unwrap();
let server_major = server_version_captures
.get(1)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_minor = server_version_captures
.get(2)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_patch = server_version_captures
.get(3)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
if server_major < major
|| (server_major == major && server_minor < minor)
|| (server_major == major && server_minor == minor && server_patch < patch)
{
return false;
}
true
}
/// Returns the client IP as known by the server.
/// Supported as of server version 2.1.6.
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_ip());
/// # Ok(())
/// # }
/// ```
pub fn client_ip(&self) -> io::Result<std::net::IpAddr> {
let info = self.0.client.server_info();
match info.client_ip.as_str() {
"" => Err(Error::new(
ErrorKind::Other,
&*format!(
"client_ip was not provided by the server. It is \
supported on servers above version 2.1.6. The server \
version is {}",
info.version
),
)),
ip => match ip.parse() {
Ok(addr) => Ok(addr),
Err(_) => Err(Error::new(
ErrorKind::InvalidData,
&*format!(
"client_ip provided by the server cannot be parsed. \
The server provided IP: {}",
info.client_ip
),
)),
},
}
}
/// Returns the client ID as known by the most recently connected server.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_id());
/// # Ok(())
/// # }
/// ```
pub fn client_id(&self) -> u64 {
self.0.client.server_info().client_id
}
/// Send an unsubscription for all subs then flush the connection, allowing
/// any unprocessed messages to be handled by a handler function if one
/// is configured.
///
/// After the flush returns, we know that a round-trip to the server has
/// happened after it received our unsubscription, so we shut down the
/// subscriber afterwards.
///
/// A similar method exists for the `Subscription` struct which will drain
/// a single `Subscription` without shutting down the entire connection
/// afterward.
///
/// # Example
/// ```no_run
/// # use std::sync::{Arc, atomic::{AtomicBool, Ordering::SeqCst}};
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let received = Arc::new(AtomicBool::new(false));
/// let received_2 = received.clone();
///
/// nc.subscribe("test.drain")?.with_handler(move |m| {
/// received_2.store(true, SeqCst);
/// Ok(())
/// });
///
/// nc.publish("test.drain", "message")?;
/// nc.drain()?;
///
/// # std::thread::sleep(std::time::Duration::from_secs(1));
///
/// assert!(received.load(SeqCst));
///
/// # Ok(())
/// # }
/// ```
pub fn drain(&self) -> io::Result<()> {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT)?;
self.0.client.close();
Ok(())
}
/// Publish a message which may have a reply subject or headers set.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo.headers")?;
/// let headers = [("header1", "value1"), ("header2", "value2")]
/// .iter()
/// .collect();
/// let reply_to = None;
/// nc.publish_with_reply_or_headers("foo.headers", reply_to, Some(&headers), "Hello World!")?;
/// nc.flush()?;
/// let message = sub.next_timeout(std::time::Duration::from_secs(2)).unwrap();
/// assert_eq!(message.headers.unwrap().len(), 2);
/// # Ok(())
/// # }
/// ```
pub fn publish_with_reply_or_headers(
&self,
subject: &str,
reply: Option<&str>,
headers: Option<&HeaderMap>,
msg: impl AsRef<[u8]>,
) -> io::Result<()> |
/// Returns the maximum payload size the most recently
/// connected server will accept.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// let nc = nats::connect("demo.nats.io")?;
/// println!("max payload: {:?}", nc.max_payload());
/// # Ok(())
/// # }
pub fn max_payload(&self) -> usize {
self.0.client.server_info.lock().max_payload
}
fn do_subscribe(&self, subject: &str, queue: Option<&str>) -> io::Result<Subscription> {
let (sid, receiver) = self.0.client.subscribe(subject, queue)?;
Ok(Subscription::new(
sid,
subject.to_string(),
receiver,
self.0.client.clone(),
))
}
/// Attempts to publish a message without blocking.
#[doc(hidden)]
pub fn try_publish_with_reply_or_headers(
&self,
subject: &str,
reply: Option<&str>,
headers: Option<&HeaderMap>,
msg: impl AsRef<[u8]>,
) -> Option<io::Result<()>> {
self.0
.client
.try_publish(subject, reply, headers, msg.as_ref())
}
}
| {
self.0.client.publish(subject, reply, headers, msg.as_ref())
} | identifier_body |
prebuilt.go | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cc
import (
"android/soong/android"
"path/filepath"
"strings"
)
func init() {
RegisterPrebuiltBuildComponents(android.InitRegistrationContext)
}
func RegisterPrebuiltBuildComponents(ctx android.RegistrationContext) {
ctx.RegisterModuleType("cc_prebuilt_library", PrebuiltLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_library_shared", PrebuiltSharedLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_library_static", PrebuiltStaticLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_test_library_shared", PrebuiltSharedTestLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_object", prebuiltObjectFactory)
ctx.RegisterModuleType("cc_prebuilt_binary", prebuiltBinaryFactory)
}
type prebuiltLinkerInterface interface {
Name(string) string
prebuilt() *android.Prebuilt
}
type prebuiltLinkerProperties struct {
// a prebuilt library or binary. Can reference a genrule module that generates an executable file.
Srcs []string `android:"path,arch_variant"`
Sanitized Sanitized `android:"arch_variant"`
// Check the prebuilt ELF files (e.g. DT_SONAME, DT_NEEDED, resolution of undefined
// symbols, etc), default true.
Check_elf_files *bool
// Optionally provide an import library if this is a Windows PE DLL prebuilt.
// This is needed only if this library is linked by other modules in build time.
// Only makes sense for the Windows target.
Windows_import_lib *string `android:"path,arch_variant"`
}
type prebuiltLinker struct {
android.Prebuilt
properties prebuiltLinkerProperties
}
func (p *prebuiltLinker) prebuilt() *android.Prebuilt {
return &p.Prebuilt
}
func (p *prebuiltLinker) PrebuiltSrcs() []string {
return p.properties.Srcs
}
type prebuiltLibraryInterface interface {
libraryInterface
prebuiltLinkerInterface
disablePrebuilt()
}
type prebuiltLibraryLinker struct {
*libraryDecorator
prebuiltLinker
}
var _ prebuiltLinkerInterface = (*prebuiltLibraryLinker)(nil)
var _ prebuiltLibraryInterface = (*prebuiltLibraryLinker)(nil)
func (p *prebuiltLibraryLinker) linkerInit(ctx BaseModuleContext) {}
func (p *prebuiltLibraryLinker) linkerDeps(ctx DepsContext, deps Deps) Deps {
return p.libraryDecorator.linkerDeps(ctx, deps)
}
func (p *prebuiltLibraryLinker) linkerFlags(ctx ModuleContext, flags Flags) Flags {
return flags
}
func (p *prebuiltLibraryLinker) linkerProps() []interface{} {
return p.libraryDecorator.linkerProps()
}
func (p *prebuiltLibraryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
p.libraryDecorator.flagExporter.exportIncludes(ctx)
p.libraryDecorator.flagExporter.reexportDirs(deps.ReexportedDirs...)
p.libraryDecorator.flagExporter.reexportSystemDirs(deps.ReexportedSystemDirs...)
p.libraryDecorator.flagExporter.reexportFlags(deps.ReexportedFlags...)
p.libraryDecorator.flagExporter.reexportDeps(deps.ReexportedDeps...)
p.libraryDecorator.flagExporter.addExportedGeneratedHeaders(deps.ReexportedGeneratedHeaders...)
p.libraryDecorator.flagExporter.setProvider(ctx)
// TODO(ccross): verify shared library dependencies
srcs := p.prebuiltSrcs(ctx)
if len(srcs) > 0 {
builderFlags := flagsToBuilderFlags(flags)
if len(srcs) > 1 {
ctx.PropertyErrorf("srcs", "multiple prebuilt source files")
return nil
}
p.libraryDecorator.exportVersioningMacroIfNeeded(ctx)
in := android.PathForModuleSrc(ctx, srcs[0])
if p.static() {
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(in).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: in,
TransitiveStaticLibrariesForOrdering: depSet,
})
return in
}
if p.shared() {
p.unstrippedOutputFile = in
libName := p.libraryDecorator.getLibName(ctx) + flags.Toolchain.ShlibSuffix()
outputFile := android.PathForModuleOut(ctx, libName)
var implicits android.Paths
if p.stripper.NeedsStrip(ctx) {
stripFlags := flagsToStripFlags(flags)
stripped := android.PathForModuleOut(ctx, "stripped", libName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, stripFlags)
in = stripped
}
// Optimize out relinking against shared libraries whose interface hasn't changed by
// depending on a table of contents file instead of the library itself.
tocFile := android.PathForModuleOut(ctx, libName+".toc")
p.tocFile = android.OptionalPathForPath(tocFile)
transformSharedObjectToToc(ctx, outputFile, tocFile, builderFlags)
if ctx.Windows() && p.properties.Windows_import_lib != nil {
// Consumers of this library actually links to the import library in build
// time and dynamically links to the DLL in run time. i.e.
// a.exe <-- static link --> foo.lib <-- dynamic link --> foo.dll
importLibSrc := android.PathForModuleSrc(ctx, String(p.properties.Windows_import_lib))
importLibName := p.libraryDecorator.getLibName(ctx) + ".lib"
importLibOutputFile := android.PathForModuleOut(ctx, importLibName)
implicits = append(implicits, importLibOutputFile)
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt import library",
Input: importLibSrc,
Output: importLibOutputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt shared library",
Implicits: implicits,
Input: in,
Output: outputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
ctx.SetProvider(SharedLibraryInfoProvider, SharedLibraryInfo{
SharedLibrary: outputFile,
UnstrippedSharedLibrary: p.unstrippedOutputFile,
Target: ctx.Target(),
TableOfContents: p.tocFile,
})
return outputFile
}
}
if p.header() {
ctx.SetProvider(HeaderLibraryInfoProvider, HeaderLibraryInfo{})
return nil
}
return nil
}
func (p *prebuiltLibraryLinker) prebuiltSrcs(ctx android.BaseModuleContext) []string {
sanitize := ctx.Module().(*Module).sanitize
srcs := p.properties.Srcs
srcs = append(srcs, srcsForSanitizer(sanitize, p.properties.Sanitized)...)
if p.static() {
srcs = append(srcs, p.libraryDecorator.StaticProperties.Static.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.StaticProperties.Static.Sanitized)...)
}
if p.shared() {
srcs = append(srcs, p.libraryDecorator.SharedProperties.Shared.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.SharedProperties.Shared.Sanitized)...)
}
return srcs
}
func (p *prebuiltLibraryLinker) shared() bool {
return p.libraryDecorator.shared()
}
func (p *prebuiltLibraryLinker) nativeCoverage() bool {
return false
}
func (p *prebuiltLibraryLinker) disablePrebuilt() {
p.properties.Srcs = nil
}
// Implements versionedInterface
func (p *prebuiltLibraryLinker) | (name string) string {
return strings.TrimPrefix(name, "prebuilt_")
}
func NewPrebuiltLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewLibrary(hod)
module.compiler = nil
prebuilt := &prebuiltLibraryLinker{
libraryDecorator: library,
}
module.linker = prebuilt
module.library = prebuilt
module.AddProperties(&prebuilt.properties)
srcsSupplier := func(ctx android.BaseModuleContext, _ android.Module) []string {
return prebuilt.prebuiltSrcs(ctx)
}
android.InitPrebuiltModuleWithSrcSupplier(module, srcsSupplier, "srcs")
// Prebuilt libraries can be used in SDKs.
android.InitSdkAwareModule(module)
return module, library
}
// cc_prebuilt_library installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltLibraryFactory() android.Module {
module, _ := NewPrebuiltLibrary(android.HostAndDeviceSupported)
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module.Init()
}
// cc_prebuilt_library_shared installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltSharedLibraryFactory() android.Module {
module, _ := NewPrebuiltSharedLibrary(android.HostAndDeviceSupported)
return module.Init()
}
// cc_prebuilt_test_library_shared installs a precompiled shared library
// to be used as a data dependency of a test-related module (such as cc_test, or
// cc_test_library).
func PrebuiltSharedTestLibraryFactory() android.Module {
module, library := NewPrebuiltLibrary(android.HostAndDeviceSupported)
library.BuildOnlyShared()
library.baseInstaller = NewTestInstaller()
return module.Init()
}
func NewPrebuiltSharedLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyShared()
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module, library
}
// cc_prebuilt_library_static installs a precompiled static library that are
// listed in the srcs property in the device's directory.
func PrebuiltStaticLibraryFactory() android.Module {
module, _ := NewPrebuiltStaticLibrary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltStaticLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyStatic()
module.bazelHandler = &prebuiltStaticLibraryBazelHandler{module: module, library: library}
return module, library
}
type prebuiltObjectProperties struct {
Srcs []string `android:"path,arch_variant"`
}
type prebuiltObjectLinker struct {
android.Prebuilt
objectLinker
properties prebuiltObjectProperties
}
type prebuiltStaticLibraryBazelHandler struct {
bazelHandler
module *Module
library *libraryDecorator
}
func (h *prebuiltStaticLibraryBazelHandler) generateBazelBuildActions(ctx android.ModuleContext, label string) bool {
bazelCtx := ctx.Config().BazelContext
ccInfo, ok, err := bazelCtx.GetCcInfo(label, ctx.Arch().ArchType)
if err != nil {
ctx.ModuleErrorf("Error getting Bazel CcInfo: %s", err)
}
if !ok {
return false
}
staticLibs := ccInfo.CcStaticLibraryFiles
if len(staticLibs) > 1 {
ctx.ModuleErrorf("expected 1 static library from bazel target %q, got %s", label, staticLibs)
return false
}
// TODO(b/184543518): cc_prebuilt_library_static may have properties for re-exporting flags
// TODO(eakammer):Add stub-related flags if this library is a stub library.
// h.library.exportVersioningMacroIfNeeded(ctx)
// Dependencies on this library will expect collectedSnapshotHeaders to be set, otherwise
// validation will fail. For now, set this to an empty list.
// TODO(cparsons): More closely mirror the collectHeadersForSnapshot implementation.
h.library.collectedSnapshotHeaders = android.Paths{}
if len(staticLibs) == 0 {
h.module.outputFile = android.OptionalPath{}
return true
}
out := android.PathForBazelOut(ctx, staticLibs[0])
h.module.outputFile = android.OptionalPathForPath(out)
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(out).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: out,
TransitiveStaticLibrariesForOrdering: depSet,
})
return true
}
func (p *prebuiltObjectLinker) prebuilt() *android.Prebuilt {
return &p.Prebuilt
}
var _ prebuiltLinkerInterface = (*prebuiltObjectLinker)(nil)
func (p *prebuiltObjectLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
if len(p.properties.Srcs) > 0 {
return p.Prebuilt.SingleSourcePath(ctx)
}
return nil
}
func (p *prebuiltObjectLinker) object() bool {
return true
}
func newPrebuiltObject() *Module {
module := newObject()
prebuilt := &prebuiltObjectLinker{
objectLinker: objectLinker{
baseLinker: NewBaseLinker(nil),
},
}
module.linker = prebuilt
module.AddProperties(&prebuilt.properties)
android.InitPrebuiltModule(module, &prebuilt.properties.Srcs)
android.InitSdkAwareModule(module)
return module
}
func prebuiltObjectFactory() android.Module {
module := newPrebuiltObject()
return module.Init()
}
type prebuiltBinaryLinker struct {
*binaryDecorator
prebuiltLinker
toolPath android.OptionalPath
}
var _ prebuiltLinkerInterface = (*prebuiltBinaryLinker)(nil)
func (p *prebuiltBinaryLinker) hostToolPath() android.OptionalPath {
return p.toolPath
}
func (p *prebuiltBinaryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
// TODO(ccross): verify shared library dependencies
if len(p.properties.Srcs) > 0 {
fileName := p.getStem(ctx) + flags.Toolchain.ExecutableSuffix()
in := p.Prebuilt.SingleSourcePath(ctx)
outputFile := android.PathForModuleOut(ctx, fileName)
p.unstrippedOutputFile = in
if ctx.Host() {
// Host binaries are symlinked to their prebuilt source locations. That
// way they are executed directly from there so the linker resolves their
// shared library dependencies relative to that location (using
// $ORIGIN/../lib(64):$ORIGIN/lib(64) as RUNPATH). This way the prebuilt
// repository can supply the expected versions of the shared libraries
// without interference from what is in the out tree.
// These shared lib paths may point to copies of the libs in
// .intermediates, which isn't where the binary will load them from, but
// it's fine for dependency tracking. If a library dependency is updated,
// the symlink will get a new timestamp, along with any installed symlinks
// handled in make.
sharedLibPaths := deps.EarlySharedLibs
sharedLibPaths = append(sharedLibPaths, deps.SharedLibs...)
sharedLibPaths = append(sharedLibPaths, deps.LateSharedLibs...)
var fromPath = in.String()
if !filepath.IsAbs(fromPath) {
fromPath = "$$PWD/" + fromPath
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Symlink,
Output: outputFile,
Input: in,
Implicits: sharedLibPaths,
Args: map[string]string{
"fromPath": fromPath,
},
})
p.toolPath = android.OptionalPathForPath(outputFile)
} else {
if p.stripper.NeedsStrip(ctx) {
stripped := android.PathForModuleOut(ctx, "stripped", fileName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, flagsToStripFlags(flags))
in = stripped
}
// Copy binaries to a name matching the final installed name
ctx.Build(pctx, android.BuildParams{
Rule: android.CpExecutable,
Description: "prebuilt",
Output: outputFile,
Input: in,
})
}
return outputFile
}
return nil
}
func (p *prebuiltBinaryLinker) binary() bool {
return true
}
// cc_prebuilt_binary installs a precompiled executable in srcs property in the
// device's directory.
func prebuiltBinaryFactory() android.Module {
module, _ := NewPrebuiltBinary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltBinary(hod android.HostOrDeviceSupported) (*Module, *binaryDecorator) {
module, binary := NewBinary(hod)
module.compiler = nil
prebuilt := &prebuiltBinaryLinker{
binaryDecorator: binary,
}
module.linker = prebuilt
module.installer = prebuilt
module.AddProperties(&prebuilt.properties)
android.InitPrebuiltModule(module, &prebuilt.properties.Srcs)
return module, binary
}
type Sanitized struct {
None struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
Address struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
Hwaddress struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
}
func srcsForSanitizer(sanitize *sanitize, sanitized Sanitized) []string {
if sanitize == nil {
return nil
}
if Bool(sanitize.Properties.Sanitize.Address) && sanitized.Address.Srcs != nil {
return sanitized.Address.Srcs
}
if Bool(sanitize.Properties.Sanitize.Hwaddress) && sanitized.Hwaddress.Srcs != nil {
return sanitized.Hwaddress.Srcs
}
return sanitized.None.Srcs
}
| implementationModuleName | identifier_name |
prebuilt.go | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cc
import (
"android/soong/android"
"path/filepath"
"strings"
)
func init() {
RegisterPrebuiltBuildComponents(android.InitRegistrationContext)
}
func RegisterPrebuiltBuildComponents(ctx android.RegistrationContext) {
ctx.RegisterModuleType("cc_prebuilt_library", PrebuiltLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_library_shared", PrebuiltSharedLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_library_static", PrebuiltStaticLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_test_library_shared", PrebuiltSharedTestLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_object", prebuiltObjectFactory)
ctx.RegisterModuleType("cc_prebuilt_binary", prebuiltBinaryFactory)
}
type prebuiltLinkerInterface interface {
Name(string) string
prebuilt() *android.Prebuilt
}
type prebuiltLinkerProperties struct {
// a prebuilt library or binary. Can reference a genrule module that generates an executable file.
Srcs []string `android:"path,arch_variant"`
Sanitized Sanitized `android:"arch_variant"`
// Check the prebuilt ELF files (e.g. DT_SONAME, DT_NEEDED, resolution of undefined
// symbols, etc), default true.
Check_elf_files *bool
// Optionally provide an import library if this is a Windows PE DLL prebuilt.
// This is needed only if this library is linked by other modules in build time.
// Only makes sense for the Windows target.
Windows_import_lib *string `android:"path,arch_variant"`
}
type prebuiltLinker struct {
android.Prebuilt
properties prebuiltLinkerProperties
}
func (p *prebuiltLinker) prebuilt() *android.Prebuilt {
return &p.Prebuilt
}
func (p *prebuiltLinker) PrebuiltSrcs() []string {
return p.properties.Srcs
}
type prebuiltLibraryInterface interface {
libraryInterface
prebuiltLinkerInterface
disablePrebuilt()
}
type prebuiltLibraryLinker struct {
*libraryDecorator
prebuiltLinker
}
var _ prebuiltLinkerInterface = (*prebuiltLibraryLinker)(nil)
var _ prebuiltLibraryInterface = (*prebuiltLibraryLinker)(nil)
func (p *prebuiltLibraryLinker) linkerInit(ctx BaseModuleContext) {}
func (p *prebuiltLibraryLinker) linkerDeps(ctx DepsContext, deps Deps) Deps {
return p.libraryDecorator.linkerDeps(ctx, deps)
}
func (p *prebuiltLibraryLinker) linkerFlags(ctx ModuleContext, flags Flags) Flags {
return flags
}
func (p *prebuiltLibraryLinker) linkerProps() []interface{} {
return p.libraryDecorator.linkerProps()
}
func (p *prebuiltLibraryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path { | p.libraryDecorator.flagExporter.reexportDeps(deps.ReexportedDeps...)
p.libraryDecorator.flagExporter.addExportedGeneratedHeaders(deps.ReexportedGeneratedHeaders...)
p.libraryDecorator.flagExporter.setProvider(ctx)
// TODO(ccross): verify shared library dependencies
srcs := p.prebuiltSrcs(ctx)
if len(srcs) > 0 {
builderFlags := flagsToBuilderFlags(flags)
if len(srcs) > 1 {
ctx.PropertyErrorf("srcs", "multiple prebuilt source files")
return nil
}
p.libraryDecorator.exportVersioningMacroIfNeeded(ctx)
in := android.PathForModuleSrc(ctx, srcs[0])
if p.static() {
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(in).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: in,
TransitiveStaticLibrariesForOrdering: depSet,
})
return in
}
if p.shared() {
p.unstrippedOutputFile = in
libName := p.libraryDecorator.getLibName(ctx) + flags.Toolchain.ShlibSuffix()
outputFile := android.PathForModuleOut(ctx, libName)
var implicits android.Paths
if p.stripper.NeedsStrip(ctx) {
stripFlags := flagsToStripFlags(flags)
stripped := android.PathForModuleOut(ctx, "stripped", libName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, stripFlags)
in = stripped
}
// Optimize out relinking against shared libraries whose interface hasn't changed by
// depending on a table of contents file instead of the library itself.
tocFile := android.PathForModuleOut(ctx, libName+".toc")
p.tocFile = android.OptionalPathForPath(tocFile)
transformSharedObjectToToc(ctx, outputFile, tocFile, builderFlags)
if ctx.Windows() && p.properties.Windows_import_lib != nil {
// Consumers of this library actually links to the import library in build
// time and dynamically links to the DLL in run time. i.e.
// a.exe <-- static link --> foo.lib <-- dynamic link --> foo.dll
importLibSrc := android.PathForModuleSrc(ctx, String(p.properties.Windows_import_lib))
importLibName := p.libraryDecorator.getLibName(ctx) + ".lib"
importLibOutputFile := android.PathForModuleOut(ctx, importLibName)
implicits = append(implicits, importLibOutputFile)
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt import library",
Input: importLibSrc,
Output: importLibOutputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt shared library",
Implicits: implicits,
Input: in,
Output: outputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
ctx.SetProvider(SharedLibraryInfoProvider, SharedLibraryInfo{
SharedLibrary: outputFile,
UnstrippedSharedLibrary: p.unstrippedOutputFile,
Target: ctx.Target(),
TableOfContents: p.tocFile,
})
return outputFile
}
}
if p.header() {
ctx.SetProvider(HeaderLibraryInfoProvider, HeaderLibraryInfo{})
return nil
}
return nil
}
func (p *prebuiltLibraryLinker) prebuiltSrcs(ctx android.BaseModuleContext) []string {
sanitize := ctx.Module().(*Module).sanitize
srcs := p.properties.Srcs
srcs = append(srcs, srcsForSanitizer(sanitize, p.properties.Sanitized)...)
if p.static() {
srcs = append(srcs, p.libraryDecorator.StaticProperties.Static.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.StaticProperties.Static.Sanitized)...)
}
if p.shared() {
srcs = append(srcs, p.libraryDecorator.SharedProperties.Shared.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.SharedProperties.Shared.Sanitized)...)
}
return srcs
}
func (p *prebuiltLibraryLinker) shared() bool {
return p.libraryDecorator.shared()
}
func (p *prebuiltLibraryLinker) nativeCoverage() bool {
return false
}
func (p *prebuiltLibraryLinker) disablePrebuilt() {
p.properties.Srcs = nil
}
// Implements versionedInterface
func (p *prebuiltLibraryLinker) implementationModuleName(name string) string {
return strings.TrimPrefix(name, "prebuilt_")
}
func NewPrebuiltLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewLibrary(hod)
module.compiler = nil
prebuilt := &prebuiltLibraryLinker{
libraryDecorator: library,
}
module.linker = prebuilt
module.library = prebuilt
module.AddProperties(&prebuilt.properties)
srcsSupplier := func(ctx android.BaseModuleContext, _ android.Module) []string {
return prebuilt.prebuiltSrcs(ctx)
}
android.InitPrebuiltModuleWithSrcSupplier(module, srcsSupplier, "srcs")
// Prebuilt libraries can be used in SDKs.
android.InitSdkAwareModule(module)
return module, library
}
// cc_prebuilt_library installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltLibraryFactory() android.Module {
module, _ := NewPrebuiltLibrary(android.HostAndDeviceSupported)
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module.Init()
}
// cc_prebuilt_library_shared installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltSharedLibraryFactory() android.Module {
module, _ := NewPrebuiltSharedLibrary(android.HostAndDeviceSupported)
return module.Init()
}
// cc_prebuilt_test_library_shared installs a precompiled shared library
// to be used as a data dependency of a test-related module (such as cc_test, or
// cc_test_library).
func PrebuiltSharedTestLibraryFactory() android.Module {
module, library := NewPrebuiltLibrary(android.HostAndDeviceSupported)
library.BuildOnlyShared()
library.baseInstaller = NewTestInstaller()
return module.Init()
}
func NewPrebuiltSharedLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyShared()
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module, library
}
// cc_prebuilt_library_static installs a precompiled static library that are
// listed in the srcs property in the device's directory.
func PrebuiltStaticLibraryFactory() android.Module {
module, _ := NewPrebuiltStaticLibrary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltStaticLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyStatic()
module.bazelHandler = &prebuiltStaticLibraryBazelHandler{module: module, library: library}
return module, library
}
type prebuiltObjectProperties struct {
Srcs []string `android:"path,arch_variant"`
}
type prebuiltObjectLinker struct {
android.Prebuilt
objectLinker
properties prebuiltObjectProperties
}
type prebuiltStaticLibraryBazelHandler struct {
bazelHandler
module *Module
library *libraryDecorator
}
func (h *prebuiltStaticLibraryBazelHandler) generateBazelBuildActions(ctx android.ModuleContext, label string) bool {
bazelCtx := ctx.Config().BazelContext
ccInfo, ok, err := bazelCtx.GetCcInfo(label, ctx.Arch().ArchType)
if err != nil {
ctx.ModuleErrorf("Error getting Bazel CcInfo: %s", err)
}
if !ok {
return false
}
staticLibs := ccInfo.CcStaticLibraryFiles
if len(staticLibs) > 1 {
ctx.ModuleErrorf("expected 1 static library from bazel target %q, got %s", label, staticLibs)
return false
}
// TODO(b/184543518): cc_prebuilt_library_static may have properties for re-exporting flags
// TODO(eakammer):Add stub-related flags if this library is a stub library.
// h.library.exportVersioningMacroIfNeeded(ctx)
// Dependencies on this library will expect collectedSnapshotHeaders to be set, otherwise
// validation will fail. For now, set this to an empty list.
// TODO(cparsons): More closely mirror the collectHeadersForSnapshot implementation.
h.library.collectedSnapshotHeaders = android.Paths{}
if len(staticLibs) == 0 {
h.module.outputFile = android.OptionalPath{}
return true
}
out := android.PathForBazelOut(ctx, staticLibs[0])
h.module.outputFile = android.OptionalPathForPath(out)
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(out).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: out,
TransitiveStaticLibrariesForOrdering: depSet,
})
return true
}
func (p *prebuiltObjectLinker) prebuilt() *android.Prebuilt {
return &p.Prebuilt
}
var _ prebuiltLinkerInterface = (*prebuiltObjectLinker)(nil)
func (p *prebuiltObjectLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
if len(p.properties.Srcs) > 0 {
return p.Prebuilt.SingleSourcePath(ctx)
}
return nil
}
func (p *prebuiltObjectLinker) object() bool {
return true
}
func newPrebuiltObject() *Module {
module := newObject()
prebuilt := &prebuiltObjectLinker{
objectLinker: objectLinker{
baseLinker: NewBaseLinker(nil),
},
}
module.linker = prebuilt
module.AddProperties(&prebuilt.properties)
android.InitPrebuiltModule(module, &prebuilt.properties.Srcs)
android.InitSdkAwareModule(module)
return module
}
func prebuiltObjectFactory() android.Module {
module := newPrebuiltObject()
return module.Init()
}
type prebuiltBinaryLinker struct {
*binaryDecorator
prebuiltLinker
toolPath android.OptionalPath
}
var _ prebuiltLinkerInterface = (*prebuiltBinaryLinker)(nil)
func (p *prebuiltBinaryLinker) hostToolPath() android.OptionalPath {
return p.toolPath
}
func (p *prebuiltBinaryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
// TODO(ccross): verify shared library dependencies
if len(p.properties.Srcs) > 0 {
fileName := p.getStem(ctx) + flags.Toolchain.ExecutableSuffix()
in := p.Prebuilt.SingleSourcePath(ctx)
outputFile := android.PathForModuleOut(ctx, fileName)
p.unstrippedOutputFile = in
if ctx.Host() {
// Host binaries are symlinked to their prebuilt source locations. That
// way they are executed directly from there so the linker resolves their
// shared library dependencies relative to that location (using
// $ORIGIN/../lib(64):$ORIGIN/lib(64) as RUNPATH). This way the prebuilt
// repository can supply the expected versions of the shared libraries
// without interference from what is in the out tree.
// These shared lib paths may point to copies of the libs in
// .intermediates, which isn't where the binary will load them from, but
// it's fine for dependency tracking. If a library dependency is updated,
// the symlink will get a new timestamp, along with any installed symlinks
// handled in make.
sharedLibPaths := deps.EarlySharedLibs
sharedLibPaths = append(sharedLibPaths, deps.SharedLibs...)
sharedLibPaths = append(sharedLibPaths, deps.LateSharedLibs...)
var fromPath = in.String()
if !filepath.IsAbs(fromPath) {
fromPath = "$$PWD/" + fromPath
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Symlink,
Output: outputFile,
Input: in,
Implicits: sharedLibPaths,
Args: map[string]string{
"fromPath": fromPath,
},
})
p.toolPath = android.OptionalPathForPath(outputFile)
} else {
if p.stripper.NeedsStrip(ctx) {
stripped := android.PathForModuleOut(ctx, "stripped", fileName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, flagsToStripFlags(flags))
in = stripped
}
// Copy binaries to a name matching the final installed name
ctx.Build(pctx, android.BuildParams{
Rule: android.CpExecutable,
Description: "prebuilt",
Output: outputFile,
Input: in,
})
}
return outputFile
}
return nil
}
func (p *prebuiltBinaryLinker) binary() bool {
return true
}
// cc_prebuilt_binary installs a precompiled executable in srcs property in the
// device's directory.
func prebuiltBinaryFactory() android.Module {
module, _ := NewPrebuiltBinary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltBinary(hod android.HostOrDeviceSupported) (*Module, *binaryDecorator) {
module, binary := NewBinary(hod)
module.compiler = nil
prebuilt := &prebuiltBinaryLinker{
binaryDecorator: binary,
}
module.linker = prebuilt
module.installer = prebuilt
module.AddProperties(&prebuilt.properties)
android.InitPrebuiltModule(module, &prebuilt.properties.Srcs)
return module, binary
}
type Sanitized struct {
None struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
Address struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
Hwaddress struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
}
func srcsForSanitizer(sanitize *sanitize, sanitized Sanitized) []string {
if sanitize == nil {
return nil
}
if Bool(sanitize.Properties.Sanitize.Address) && sanitized.Address.Srcs != nil {
return sanitized.Address.Srcs
}
if Bool(sanitize.Properties.Sanitize.Hwaddress) && sanitized.Hwaddress.Srcs != nil {
return sanitized.Hwaddress.Srcs
}
return sanitized.None.Srcs
} |
p.libraryDecorator.flagExporter.exportIncludes(ctx)
p.libraryDecorator.flagExporter.reexportDirs(deps.ReexportedDirs...)
p.libraryDecorator.flagExporter.reexportSystemDirs(deps.ReexportedSystemDirs...)
p.libraryDecorator.flagExporter.reexportFlags(deps.ReexportedFlags...) | random_line_split |
prebuilt.go | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cc
import (
"android/soong/android"
"path/filepath"
"strings"
)
func init() {
RegisterPrebuiltBuildComponents(android.InitRegistrationContext)
}
func RegisterPrebuiltBuildComponents(ctx android.RegistrationContext) {
ctx.RegisterModuleType("cc_prebuilt_library", PrebuiltLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_library_shared", PrebuiltSharedLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_library_static", PrebuiltStaticLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_test_library_shared", PrebuiltSharedTestLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_object", prebuiltObjectFactory)
ctx.RegisterModuleType("cc_prebuilt_binary", prebuiltBinaryFactory)
}
type prebuiltLinkerInterface interface {
Name(string) string
prebuilt() *android.Prebuilt
}
type prebuiltLinkerProperties struct {
// a prebuilt library or binary. Can reference a genrule module that generates an executable file.
Srcs []string `android:"path,arch_variant"`
Sanitized Sanitized `android:"arch_variant"`
// Check the prebuilt ELF files (e.g. DT_SONAME, DT_NEEDED, resolution of undefined
// symbols, etc), default true.
Check_elf_files *bool
// Optionally provide an import library if this is a Windows PE DLL prebuilt.
// This is needed only if this library is linked by other modules in build time.
// Only makes sense for the Windows target.
Windows_import_lib *string `android:"path,arch_variant"`
}
type prebuiltLinker struct {
android.Prebuilt
properties prebuiltLinkerProperties
}
func (p *prebuiltLinker) prebuilt() *android.Prebuilt {
return &p.Prebuilt
}
func (p *prebuiltLinker) PrebuiltSrcs() []string {
return p.properties.Srcs
}
type prebuiltLibraryInterface interface {
libraryInterface
prebuiltLinkerInterface
disablePrebuilt()
}
type prebuiltLibraryLinker struct {
*libraryDecorator
prebuiltLinker
}
var _ prebuiltLinkerInterface = (*prebuiltLibraryLinker)(nil)
var _ prebuiltLibraryInterface = (*prebuiltLibraryLinker)(nil)
func (p *prebuiltLibraryLinker) linkerInit(ctx BaseModuleContext) |
func (p *prebuiltLibraryLinker) linkerDeps(ctx DepsContext, deps Deps) Deps {
return p.libraryDecorator.linkerDeps(ctx, deps)
}
func (p *prebuiltLibraryLinker) linkerFlags(ctx ModuleContext, flags Flags) Flags {
return flags
}
func (p *prebuiltLibraryLinker) linkerProps() []interface{} {
return p.libraryDecorator.linkerProps()
}
func (p *prebuiltLibraryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
p.libraryDecorator.flagExporter.exportIncludes(ctx)
p.libraryDecorator.flagExporter.reexportDirs(deps.ReexportedDirs...)
p.libraryDecorator.flagExporter.reexportSystemDirs(deps.ReexportedSystemDirs...)
p.libraryDecorator.flagExporter.reexportFlags(deps.ReexportedFlags...)
p.libraryDecorator.flagExporter.reexportDeps(deps.ReexportedDeps...)
p.libraryDecorator.flagExporter.addExportedGeneratedHeaders(deps.ReexportedGeneratedHeaders...)
p.libraryDecorator.flagExporter.setProvider(ctx)
// TODO(ccross): verify shared library dependencies
srcs := p.prebuiltSrcs(ctx)
if len(srcs) > 0 {
builderFlags := flagsToBuilderFlags(flags)
if len(srcs) > 1 {
ctx.PropertyErrorf("srcs", "multiple prebuilt source files")
return nil
}
p.libraryDecorator.exportVersioningMacroIfNeeded(ctx)
in := android.PathForModuleSrc(ctx, srcs[0])
if p.static() {
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(in).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: in,
TransitiveStaticLibrariesForOrdering: depSet,
})
return in
}
if p.shared() {
p.unstrippedOutputFile = in
libName := p.libraryDecorator.getLibName(ctx) + flags.Toolchain.ShlibSuffix()
outputFile := android.PathForModuleOut(ctx, libName)
var implicits android.Paths
if p.stripper.NeedsStrip(ctx) {
stripFlags := flagsToStripFlags(flags)
stripped := android.PathForModuleOut(ctx, "stripped", libName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, stripFlags)
in = stripped
}
// Optimize out relinking against shared libraries whose interface hasn't changed by
// depending on a table of contents file instead of the library itself.
tocFile := android.PathForModuleOut(ctx, libName+".toc")
p.tocFile = android.OptionalPathForPath(tocFile)
transformSharedObjectToToc(ctx, outputFile, tocFile, builderFlags)
if ctx.Windows() && p.properties.Windows_import_lib != nil {
// Consumers of this library actually links to the import library in build
// time and dynamically links to the DLL in run time. i.e.
// a.exe <-- static link --> foo.lib <-- dynamic link --> foo.dll
importLibSrc := android.PathForModuleSrc(ctx, String(p.properties.Windows_import_lib))
importLibName := p.libraryDecorator.getLibName(ctx) + ".lib"
importLibOutputFile := android.PathForModuleOut(ctx, importLibName)
implicits = append(implicits, importLibOutputFile)
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt import library",
Input: importLibSrc,
Output: importLibOutputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt shared library",
Implicits: implicits,
Input: in,
Output: outputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
ctx.SetProvider(SharedLibraryInfoProvider, SharedLibraryInfo{
SharedLibrary: outputFile,
UnstrippedSharedLibrary: p.unstrippedOutputFile,
Target: ctx.Target(),
TableOfContents: p.tocFile,
})
return outputFile
}
}
if p.header() {
ctx.SetProvider(HeaderLibraryInfoProvider, HeaderLibraryInfo{})
return nil
}
return nil
}
func (p *prebuiltLibraryLinker) prebuiltSrcs(ctx android.BaseModuleContext) []string {
sanitize := ctx.Module().(*Module).sanitize
srcs := p.properties.Srcs
srcs = append(srcs, srcsForSanitizer(sanitize, p.properties.Sanitized)...)
if p.static() {
srcs = append(srcs, p.libraryDecorator.StaticProperties.Static.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.StaticProperties.Static.Sanitized)...)
}
if p.shared() {
srcs = append(srcs, p.libraryDecorator.SharedProperties.Shared.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.SharedProperties.Shared.Sanitized)...)
}
return srcs
}
func (p *prebuiltLibraryLinker) shared() bool {
return p.libraryDecorator.shared()
}
func (p *prebuiltLibraryLinker) nativeCoverage() bool {
return false
}
func (p *prebuiltLibraryLinker) disablePrebuilt() {
p.properties.Srcs = nil
}
// Implements versionedInterface
func (p *prebuiltLibraryLinker) implementationModuleName(name string) string {
return strings.TrimPrefix(name, "prebuilt_")
}
func NewPrebuiltLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewLibrary(hod)
module.compiler = nil
prebuilt := &prebuiltLibraryLinker{
libraryDecorator: library,
}
module.linker = prebuilt
module.library = prebuilt
module.AddProperties(&prebuilt.properties)
srcsSupplier := func(ctx android.BaseModuleContext, _ android.Module) []string {
return prebuilt.prebuiltSrcs(ctx)
}
android.InitPrebuiltModuleWithSrcSupplier(module, srcsSupplier, "srcs")
// Prebuilt libraries can be used in SDKs.
android.InitSdkAwareModule(module)
return module, library
}
// cc_prebuilt_library installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltLibraryFactory() android.Module {
module, _ := NewPrebuiltLibrary(android.HostAndDeviceSupported)
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module.Init()
}
// cc_prebuilt_library_shared installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltSharedLibraryFactory() android.Module {
module, _ := NewPrebuiltSharedLibrary(android.HostAndDeviceSupported)
return module.Init()
}
// cc_prebuilt_test_library_shared installs a precompiled shared library
// to be used as a data dependency of a test-related module (such as cc_test, or
// cc_test_library).
func PrebuiltSharedTestLibraryFactory() android.Module {
module, library := NewPrebuiltLibrary(android.HostAndDeviceSupported)
library.BuildOnlyShared()
library.baseInstaller = NewTestInstaller()
return module.Init()
}
func NewPrebuiltSharedLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyShared()
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module, library
}
// cc_prebuilt_library_static installs a precompiled static library that are
// listed in the srcs property in the device's directory.
func PrebuiltStaticLibraryFactory() android.Module {
module, _ := NewPrebuiltStaticLibrary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltStaticLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyStatic()
module.bazelHandler = &prebuiltStaticLibraryBazelHandler{module: module, library: library}
return module, library
}
type prebuiltObjectProperties struct {
Srcs []string `android:"path,arch_variant"`
}
type prebuiltObjectLinker struct {
android.Prebuilt
objectLinker
properties prebuiltObjectProperties
}
type prebuiltStaticLibraryBazelHandler struct {
bazelHandler
module *Module
library *libraryDecorator
}
func (h *prebuiltStaticLibraryBazelHandler) generateBazelBuildActions(ctx android.ModuleContext, label string) bool {
bazelCtx := ctx.Config().BazelContext
ccInfo, ok, err := bazelCtx.GetCcInfo(label, ctx.Arch().ArchType)
if err != nil {
ctx.ModuleErrorf("Error getting Bazel CcInfo: %s", err)
}
if !ok {
return false
}
staticLibs := ccInfo.CcStaticLibraryFiles
if len(staticLibs) > 1 {
ctx.ModuleErrorf("expected 1 static library from bazel target %q, got %s", label, staticLibs)
return false
}
// TODO(b/184543518): cc_prebuilt_library_static may have properties for re-exporting flags
// TODO(eakammer):Add stub-related flags if this library is a stub library.
// h.library.exportVersioningMacroIfNeeded(ctx)
// Dependencies on this library will expect collectedSnapshotHeaders to be set, otherwise
// validation will fail. For now, set this to an empty list.
// TODO(cparsons): More closely mirror the collectHeadersForSnapshot implementation.
h.library.collectedSnapshotHeaders = android.Paths{}
if len(staticLibs) == 0 {
h.module.outputFile = android.OptionalPath{}
return true
}
out := android.PathForBazelOut(ctx, staticLibs[0])
h.module.outputFile = android.OptionalPathForPath(out)
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(out).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: out,
TransitiveStaticLibrariesForOrdering: depSet,
})
return true
}
func (p *prebuiltObjectLinker) prebuilt() *android.Prebuilt {
return &p.Prebuilt
}
var _ prebuiltLinkerInterface = (*prebuiltObjectLinker)(nil)
func (p *prebuiltObjectLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
if len(p.properties.Srcs) > 0 {
return p.Prebuilt.SingleSourcePath(ctx)
}
return nil
}
func (p *prebuiltObjectLinker) object() bool {
return true
}
func newPrebuiltObject() *Module {
module := newObject()
prebuilt := &prebuiltObjectLinker{
objectLinker: objectLinker{
baseLinker: NewBaseLinker(nil),
},
}
module.linker = prebuilt
module.AddProperties(&prebuilt.properties)
android.InitPrebuiltModule(module, &prebuilt.properties.Srcs)
android.InitSdkAwareModule(module)
return module
}
func prebuiltObjectFactory() android.Module {
module := newPrebuiltObject()
return module.Init()
}
type prebuiltBinaryLinker struct {
*binaryDecorator
prebuiltLinker
toolPath android.OptionalPath
}
var _ prebuiltLinkerInterface = (*prebuiltBinaryLinker)(nil)
func (p *prebuiltBinaryLinker) hostToolPath() android.OptionalPath {
return p.toolPath
}
func (p *prebuiltBinaryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
// TODO(ccross): verify shared library dependencies
if len(p.properties.Srcs) > 0 {
fileName := p.getStem(ctx) + flags.Toolchain.ExecutableSuffix()
in := p.Prebuilt.SingleSourcePath(ctx)
outputFile := android.PathForModuleOut(ctx, fileName)
p.unstrippedOutputFile = in
if ctx.Host() {
// Host binaries are symlinked to their prebuilt source locations. That
// way they are executed directly from there so the linker resolves their
// shared library dependencies relative to that location (using
// $ORIGIN/../lib(64):$ORIGIN/lib(64) as RUNPATH). This way the prebuilt
// repository can supply the expected versions of the shared libraries
// without interference from what is in the out tree.
// These shared lib paths may point to copies of the libs in
// .intermediates, which isn't where the binary will load them from, but
// it's fine for dependency tracking. If a library dependency is updated,
// the symlink will get a new timestamp, along with any installed symlinks
// handled in make.
sharedLibPaths := deps.EarlySharedLibs
sharedLibPaths = append(sharedLibPaths, deps.SharedLibs...)
sharedLibPaths = append(sharedLibPaths, deps.LateSharedLibs...)
var fromPath = in.String()
if !filepath.IsAbs(fromPath) {
fromPath = "$$PWD/" + fromPath
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Symlink,
Output: outputFile,
Input: in,
Implicits: sharedLibPaths,
Args: map[string]string{
"fromPath": fromPath,
},
})
p.toolPath = android.OptionalPathForPath(outputFile)
} else {
if p.stripper.NeedsStrip(ctx) {
stripped := android.PathForModuleOut(ctx, "stripped", fileName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, flagsToStripFlags(flags))
in = stripped
}
// Copy binaries to a name matching the final installed name
ctx.Build(pctx, android.BuildParams{
Rule: android.CpExecutable,
Description: "prebuilt",
Output: outputFile,
Input: in,
})
}
return outputFile
}
return nil
}
func (p *prebuiltBinaryLinker) binary() bool {
return true
}
// cc_prebuilt_binary installs a precompiled executable in srcs property in the
// device's directory.
func prebuiltBinaryFactory() android.Module {
module, _ := NewPrebuiltBinary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltBinary(hod android.HostOrDeviceSupported) (*Module, *binaryDecorator) {
module, binary := NewBinary(hod)
module.compiler = nil
prebuilt := &prebuiltBinaryLinker{
binaryDecorator: binary,
}
module.linker = prebuilt
module.installer = prebuilt
module.AddProperties(&prebuilt.properties)
android.InitPrebuiltModule(module, &prebuilt.properties.Srcs)
return module, binary
}
type Sanitized struct {
None struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
Address struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
Hwaddress struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
}
func srcsForSanitizer(sanitize *sanitize, sanitized Sanitized) []string {
if sanitize == nil {
return nil
}
if Bool(sanitize.Properties.Sanitize.Address) && sanitized.Address.Srcs != nil {
return sanitized.Address.Srcs
}
if Bool(sanitize.Properties.Sanitize.Hwaddress) && sanitized.Hwaddress.Srcs != nil {
return sanitized.Hwaddress.Srcs
}
return sanitized.None.Srcs
}
| {} | identifier_body |
prebuilt.go | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cc
import (
"android/soong/android"
"path/filepath"
"strings"
)
func init() {
RegisterPrebuiltBuildComponents(android.InitRegistrationContext)
}
func RegisterPrebuiltBuildComponents(ctx android.RegistrationContext) {
ctx.RegisterModuleType("cc_prebuilt_library", PrebuiltLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_library_shared", PrebuiltSharedLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_library_static", PrebuiltStaticLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_test_library_shared", PrebuiltSharedTestLibraryFactory)
ctx.RegisterModuleType("cc_prebuilt_object", prebuiltObjectFactory)
ctx.RegisterModuleType("cc_prebuilt_binary", prebuiltBinaryFactory)
}
type prebuiltLinkerInterface interface {
Name(string) string
prebuilt() *android.Prebuilt
}
type prebuiltLinkerProperties struct {
// a prebuilt library or binary. Can reference a genrule module that generates an executable file.
Srcs []string `android:"path,arch_variant"`
Sanitized Sanitized `android:"arch_variant"`
// Check the prebuilt ELF files (e.g. DT_SONAME, DT_NEEDED, resolution of undefined
// symbols, etc), default true.
Check_elf_files *bool
// Optionally provide an import library if this is a Windows PE DLL prebuilt.
// This is needed only if this library is linked by other modules in build time.
// Only makes sense for the Windows target.
Windows_import_lib *string `android:"path,arch_variant"`
}
type prebuiltLinker struct {
android.Prebuilt
properties prebuiltLinkerProperties
}
func (p *prebuiltLinker) prebuilt() *android.Prebuilt {
return &p.Prebuilt
}
func (p *prebuiltLinker) PrebuiltSrcs() []string {
return p.properties.Srcs
}
type prebuiltLibraryInterface interface {
libraryInterface
prebuiltLinkerInterface
disablePrebuilt()
}
type prebuiltLibraryLinker struct {
*libraryDecorator
prebuiltLinker
}
var _ prebuiltLinkerInterface = (*prebuiltLibraryLinker)(nil)
var _ prebuiltLibraryInterface = (*prebuiltLibraryLinker)(nil)
func (p *prebuiltLibraryLinker) linkerInit(ctx BaseModuleContext) {}
func (p *prebuiltLibraryLinker) linkerDeps(ctx DepsContext, deps Deps) Deps {
return p.libraryDecorator.linkerDeps(ctx, deps)
}
func (p *prebuiltLibraryLinker) linkerFlags(ctx ModuleContext, flags Flags) Flags {
return flags
}
func (p *prebuiltLibraryLinker) linkerProps() []interface{} {
return p.libraryDecorator.linkerProps()
}
func (p *prebuiltLibraryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
p.libraryDecorator.flagExporter.exportIncludes(ctx)
p.libraryDecorator.flagExporter.reexportDirs(deps.ReexportedDirs...)
p.libraryDecorator.flagExporter.reexportSystemDirs(deps.ReexportedSystemDirs...)
p.libraryDecorator.flagExporter.reexportFlags(deps.ReexportedFlags...)
p.libraryDecorator.flagExporter.reexportDeps(deps.ReexportedDeps...)
p.libraryDecorator.flagExporter.addExportedGeneratedHeaders(deps.ReexportedGeneratedHeaders...)
p.libraryDecorator.flagExporter.setProvider(ctx)
// TODO(ccross): verify shared library dependencies
srcs := p.prebuiltSrcs(ctx)
if len(srcs) > 0 |
if p.header() {
ctx.SetProvider(HeaderLibraryInfoProvider, HeaderLibraryInfo{})
return nil
}
return nil
}
func (p *prebuiltLibraryLinker) prebuiltSrcs(ctx android.BaseModuleContext) []string {
sanitize := ctx.Module().(*Module).sanitize
srcs := p.properties.Srcs
srcs = append(srcs, srcsForSanitizer(sanitize, p.properties.Sanitized)...)
if p.static() {
srcs = append(srcs, p.libraryDecorator.StaticProperties.Static.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.StaticProperties.Static.Sanitized)...)
}
if p.shared() {
srcs = append(srcs, p.libraryDecorator.SharedProperties.Shared.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.SharedProperties.Shared.Sanitized)...)
}
return srcs
}
func (p *prebuiltLibraryLinker) shared() bool {
return p.libraryDecorator.shared()
}
func (p *prebuiltLibraryLinker) nativeCoverage() bool {
return false
}
func (p *prebuiltLibraryLinker) disablePrebuilt() {
p.properties.Srcs = nil
}
// Implements versionedInterface
func (p *prebuiltLibraryLinker) implementationModuleName(name string) string {
return strings.TrimPrefix(name, "prebuilt_")
}
func NewPrebuiltLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewLibrary(hod)
module.compiler = nil
prebuilt := &prebuiltLibraryLinker{
libraryDecorator: library,
}
module.linker = prebuilt
module.library = prebuilt
module.AddProperties(&prebuilt.properties)
srcsSupplier := func(ctx android.BaseModuleContext, _ android.Module) []string {
return prebuilt.prebuiltSrcs(ctx)
}
android.InitPrebuiltModuleWithSrcSupplier(module, srcsSupplier, "srcs")
// Prebuilt libraries can be used in SDKs.
android.InitSdkAwareModule(module)
return module, library
}
// cc_prebuilt_library installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltLibraryFactory() android.Module {
module, _ := NewPrebuiltLibrary(android.HostAndDeviceSupported)
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module.Init()
}
// cc_prebuilt_library_shared installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltSharedLibraryFactory() android.Module {
module, _ := NewPrebuiltSharedLibrary(android.HostAndDeviceSupported)
return module.Init()
}
// cc_prebuilt_test_library_shared installs a precompiled shared library
// to be used as a data dependency of a test-related module (such as cc_test, or
// cc_test_library).
func PrebuiltSharedTestLibraryFactory() android.Module {
module, library := NewPrebuiltLibrary(android.HostAndDeviceSupported)
library.BuildOnlyShared()
library.baseInstaller = NewTestInstaller()
return module.Init()
}
func NewPrebuiltSharedLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyShared()
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module, library
}
// cc_prebuilt_library_static installs a precompiled static library that are
// listed in the srcs property in the device's directory.
func PrebuiltStaticLibraryFactory() android.Module {
module, _ := NewPrebuiltStaticLibrary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltStaticLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyStatic()
module.bazelHandler = &prebuiltStaticLibraryBazelHandler{module: module, library: library}
return module, library
}
type prebuiltObjectProperties struct {
Srcs []string `android:"path,arch_variant"`
}
type prebuiltObjectLinker struct {
android.Prebuilt
objectLinker
properties prebuiltObjectProperties
}
type prebuiltStaticLibraryBazelHandler struct {
bazelHandler
module *Module
library *libraryDecorator
}
func (h *prebuiltStaticLibraryBazelHandler) generateBazelBuildActions(ctx android.ModuleContext, label string) bool {
bazelCtx := ctx.Config().BazelContext
ccInfo, ok, err := bazelCtx.GetCcInfo(label, ctx.Arch().ArchType)
if err != nil {
ctx.ModuleErrorf("Error getting Bazel CcInfo: %s", err)
}
if !ok {
return false
}
staticLibs := ccInfo.CcStaticLibraryFiles
if len(staticLibs) > 1 {
ctx.ModuleErrorf("expected 1 static library from bazel target %q, got %s", label, staticLibs)
return false
}
// TODO(b/184543518): cc_prebuilt_library_static may have properties for re-exporting flags
// TODO(eakammer):Add stub-related flags if this library is a stub library.
// h.library.exportVersioningMacroIfNeeded(ctx)
// Dependencies on this library will expect collectedSnapshotHeaders to be set, otherwise
// validation will fail. For now, set this to an empty list.
// TODO(cparsons): More closely mirror the collectHeadersForSnapshot implementation.
h.library.collectedSnapshotHeaders = android.Paths{}
if len(staticLibs) == 0 {
h.module.outputFile = android.OptionalPath{}
return true
}
out := android.PathForBazelOut(ctx, staticLibs[0])
h.module.outputFile = android.OptionalPathForPath(out)
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(out).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: out,
TransitiveStaticLibrariesForOrdering: depSet,
})
return true
}
func (p *prebuiltObjectLinker) prebuilt() *android.Prebuilt {
return &p.Prebuilt
}
var _ prebuiltLinkerInterface = (*prebuiltObjectLinker)(nil)
func (p *prebuiltObjectLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
if len(p.properties.Srcs) > 0 {
return p.Prebuilt.SingleSourcePath(ctx)
}
return nil
}
func (p *prebuiltObjectLinker) object() bool {
return true
}
func newPrebuiltObject() *Module {
module := newObject()
prebuilt := &prebuiltObjectLinker{
objectLinker: objectLinker{
baseLinker: NewBaseLinker(nil),
},
}
module.linker = prebuilt
module.AddProperties(&prebuilt.properties)
android.InitPrebuiltModule(module, &prebuilt.properties.Srcs)
android.InitSdkAwareModule(module)
return module
}
func prebuiltObjectFactory() android.Module {
module := newPrebuiltObject()
return module.Init()
}
type prebuiltBinaryLinker struct {
*binaryDecorator
prebuiltLinker
toolPath android.OptionalPath
}
var _ prebuiltLinkerInterface = (*prebuiltBinaryLinker)(nil)
func (p *prebuiltBinaryLinker) hostToolPath() android.OptionalPath {
return p.toolPath
}
func (p *prebuiltBinaryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
// TODO(ccross): verify shared library dependencies
if len(p.properties.Srcs) > 0 {
fileName := p.getStem(ctx) + flags.Toolchain.ExecutableSuffix()
in := p.Prebuilt.SingleSourcePath(ctx)
outputFile := android.PathForModuleOut(ctx, fileName)
p.unstrippedOutputFile = in
if ctx.Host() {
// Host binaries are symlinked to their prebuilt source locations. That
// way they are executed directly from there so the linker resolves their
// shared library dependencies relative to that location (using
// $ORIGIN/../lib(64):$ORIGIN/lib(64) as RUNPATH). This way the prebuilt
// repository can supply the expected versions of the shared libraries
// without interference from what is in the out tree.
// These shared lib paths may point to copies of the libs in
// .intermediates, which isn't where the binary will load them from, but
// it's fine for dependency tracking. If a library dependency is updated,
// the symlink will get a new timestamp, along with any installed symlinks
// handled in make.
sharedLibPaths := deps.EarlySharedLibs
sharedLibPaths = append(sharedLibPaths, deps.SharedLibs...)
sharedLibPaths = append(sharedLibPaths, deps.LateSharedLibs...)
var fromPath = in.String()
if !filepath.IsAbs(fromPath) {
fromPath = "$$PWD/" + fromPath
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Symlink,
Output: outputFile,
Input: in,
Implicits: sharedLibPaths,
Args: map[string]string{
"fromPath": fromPath,
},
})
p.toolPath = android.OptionalPathForPath(outputFile)
} else {
if p.stripper.NeedsStrip(ctx) {
stripped := android.PathForModuleOut(ctx, "stripped", fileName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, flagsToStripFlags(flags))
in = stripped
}
// Copy binaries to a name matching the final installed name
ctx.Build(pctx, android.BuildParams{
Rule: android.CpExecutable,
Description: "prebuilt",
Output: outputFile,
Input: in,
})
}
return outputFile
}
return nil
}
func (p *prebuiltBinaryLinker) binary() bool {
return true
}
// cc_prebuilt_binary installs a precompiled executable in srcs property in the
// device's directory.
func prebuiltBinaryFactory() android.Module {
module, _ := NewPrebuiltBinary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltBinary(hod android.HostOrDeviceSupported) (*Module, *binaryDecorator) {
module, binary := NewBinary(hod)
module.compiler = nil
prebuilt := &prebuiltBinaryLinker{
binaryDecorator: binary,
}
module.linker = prebuilt
module.installer = prebuilt
module.AddProperties(&prebuilt.properties)
android.InitPrebuiltModule(module, &prebuilt.properties.Srcs)
return module, binary
}
type Sanitized struct {
None struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
Address struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
Hwaddress struct {
Srcs []string `android:"path,arch_variant"`
} `android:"arch_variant"`
}
func srcsForSanitizer(sanitize *sanitize, sanitized Sanitized) []string {
if sanitize == nil {
return nil
}
if Bool(sanitize.Properties.Sanitize.Address) && sanitized.Address.Srcs != nil {
return sanitized.Address.Srcs
}
if Bool(sanitize.Properties.Sanitize.Hwaddress) && sanitized.Hwaddress.Srcs != nil {
return sanitized.Hwaddress.Srcs
}
return sanitized.None.Srcs
}
| {
builderFlags := flagsToBuilderFlags(flags)
if len(srcs) > 1 {
ctx.PropertyErrorf("srcs", "multiple prebuilt source files")
return nil
}
p.libraryDecorator.exportVersioningMacroIfNeeded(ctx)
in := android.PathForModuleSrc(ctx, srcs[0])
if p.static() {
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(in).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: in,
TransitiveStaticLibrariesForOrdering: depSet,
})
return in
}
if p.shared() {
p.unstrippedOutputFile = in
libName := p.libraryDecorator.getLibName(ctx) + flags.Toolchain.ShlibSuffix()
outputFile := android.PathForModuleOut(ctx, libName)
var implicits android.Paths
if p.stripper.NeedsStrip(ctx) {
stripFlags := flagsToStripFlags(flags)
stripped := android.PathForModuleOut(ctx, "stripped", libName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, stripFlags)
in = stripped
}
// Optimize out relinking against shared libraries whose interface hasn't changed by
// depending on a table of contents file instead of the library itself.
tocFile := android.PathForModuleOut(ctx, libName+".toc")
p.tocFile = android.OptionalPathForPath(tocFile)
transformSharedObjectToToc(ctx, outputFile, tocFile, builderFlags)
if ctx.Windows() && p.properties.Windows_import_lib != nil {
// Consumers of this library actually links to the import library in build
// time and dynamically links to the DLL in run time. i.e.
// a.exe <-- static link --> foo.lib <-- dynamic link --> foo.dll
importLibSrc := android.PathForModuleSrc(ctx, String(p.properties.Windows_import_lib))
importLibName := p.libraryDecorator.getLibName(ctx) + ".lib"
importLibOutputFile := android.PathForModuleOut(ctx, importLibName)
implicits = append(implicits, importLibOutputFile)
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt import library",
Input: importLibSrc,
Output: importLibOutputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt shared library",
Implicits: implicits,
Input: in,
Output: outputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
ctx.SetProvider(SharedLibraryInfoProvider, SharedLibraryInfo{
SharedLibrary: outputFile,
UnstrippedSharedLibrary: p.unstrippedOutputFile,
Target: ctx.Target(),
TableOfContents: p.tocFile,
})
return outputFile
}
} | conditional_block |
handTrack1.py | import cv2
import numpy as np
import pickle
import random
from matplotlib import pyplot as plot
import pyautogui
import math
import time
#from object_tracker import startX, startY, endY, endX
hand_hist = None
traverse_point = []
total_rectangle = 16
hand_rect_one_x = None
hand_rect_one_y = None
hand_rect_two_x = None
hand_rect_two_y = None
have_background = False
background = None
defHands = []
newHands = []
lengthColec = []
cx = 200
cy = 200
bx = 200
by = 300
sx = 0
sy = 0
cropped = None
areaHand = 7000
startX = 0
endX = 0
width = 0
height = 0
startY = 0
endY = 0
backDivY = 10
backDivX = 17
backgroundHists = None
oldX = 0
oldY = 0
firstRun = 0
sameCount = 0
resetCount = 100
handFound = False
fWidth = 0
fHeight = 0
frameWidth = 0
frameHeight = 0
def rescale_frame(frame, wpercent=100, hpercent=100):
global fWidth, fHeight
fWidth = int(frame.shape[1] * wpercent / 100)
fHeight = int(frame.shape[0] * hpercent / 100)
##print(str(fWidth))
#print(str(fHeight))
return cv2.resize(frame, (fWidth, fHeight), interpolation=cv2.INTER_AREA)
def contours(frame, hist_mask_image):
#gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255, 0)
'''
hsv = cv2.cvtColor(hist_mask_image,cv2.COLOR_BGR2HSV)
ave = [0, 0, 0]
allPixR = []
allPixB = []
allPixG = []
for i in (0,len(hsv)-1):
for j in (0,len(hsv[0])-1):
ave += hsv[i][j]
allPixR.append(hsv[i][j][0])
allPixB.append(hsv[i][j][1])
allPixG.append(hsv[i][j][2])
ave = ave/(len(allPixR))
stdr = np.std(allPixR)+10
stdb = np.std(allPixB)+10
stdg = np.std(allPixG)+10
lower = np.array([int(ave[0]-stdr), int(ave[1]-stdb), int(ave[2]-stdg)])
upper = np.array([int(ave[0]+stdr), int(ave[1]+stdb), int(ave[2]+stdg)])
thresh = cv2.inRange(hsv, lower, upper)
'''
gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255,0)# cv2.ADAPTIVE_THRESH_MEAN_C)
#colour_mask = cv2.inRange(hsv,)
cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#hull = cv2.convexHull(cont, returnPoints=False)
#defects = cv2.convexityDefects(cont, hull)
#if (len(defects) >= 2):
#cv2.drawContours(frame, cont, -1, (0,255,0), 3)
return cont
def max_contour(frame, contour_list):
global bx, by, sx, sy, handFound
global defHands
global newHands
global lengthColec
global areaHand, cropped
global fWidth, fHeight
global resetCount
neededHands = 0
handFound = False
exMatch = None
max_i = -1
max_area = 0
min = 7000
max = 30000
tempx = 0
tempy = 0
with open("Hands.txt", "rb") as fp:
defHands = pickle.load(fp)
#print("\n\n numHands" +str(len(defHands)) + "\n\n")
#print("\n\n cont list " +str(len(defHands)) + "\n\n")
for i in range(len(contour_list)):
#time.sleep(1)
cnt = contour_list[i]
area_cnt = cv2.contourArea(cnt)
hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
moment = cv2.moments(cnt)
if moment['m00'] != 0:
tempx = int(moment['m10'] / moment['m00']) + sx
tempy = int(moment['m01'] / moment['m00']) + sy
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#if (len(cnt)>4):
#print(str(cv2.fitEllipse(cnt)))
#print("Num of sides: " + str(len(approx)))
#lengthColec.append(len(approx))
center = (bx,by)
colourCenter = (10,10,10)
cv2.circle(frame, center, 10,colourCenter,2)
numMatch = 0
for j in ((defHands)):
if (cv2.matchShapes(j, cnt, 1, 0.0) < 0.02) and abs(cv2.contourArea(j)-cv2.contourArea(cnt))<7000:
numMatch+=1
exMatch = j
if numMatch > neededHands:
break
#print("matchAve so far: " + str(matchAve))
if len(defHands) > 0:
pMatch = numMatch/ len(defHands)
else:
pMatch = -1
#print("\n\n match is: " + str(matchAve) +"\n\n")
try:
#if ( len(approx == 3) or (len(approx) >= 11 and len (aprox) < 14) or (len(approx) >= 8 and len (aprox) <= 9) or (len(approx) >= 18 and len (aprox) <= 19)) and len(defects) >=2 and area_cnt > 9000 and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 200**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
#print("Center: " + str(tempx) + ", "+ str(tempy))
#print("Should be : " + str(bx) + ", "+ str(by))
#print("bad")
#cv2.circle(frame, bx,by, 10,[10,10,10]
#print("num matched: " +str(numMatch))
if numMatch >neededHands and area_cnt > 2000 and ((tempx-bx)**2 + (tempy-by)**2 < 150**2) :#and (areaHand == -1 or abs(area_cnt -areaHand) <2000) and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 100**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
handFound = True
#if numMatch > 0 :#and ((tempx-bx)**2 + (tempy-by)**2 < 100**2) and and len(approx)>=5 and len(approx) <=19 and len(defects) >=2
#area_cnt>max_area
#print("\n\n in \n\n")
cv2.drawContours(cropped, cnt, -1, (0,255,0), 3)
#cv2.drawContours(cropped, exMatch, -1, (255,0,255), 3)
newHands.append(cnt)
areaHand = area_cnt
bx = tempx
by = tempy
'''
if bx < int(width/2)+20:
bx = int(width/2)
if bx > fWidth-int(width/2):
bx = fWidth-int(width/2)
if by < int(height/2):
by = int(height/2)
if by > fHeight-int(height/2):
by = fHeight-int(height/2)
'''
#print("Center: " + str(bx) + ", "+ str(by))
max_area = area_cnt
max_i = i
#print("accepted area" + str(area_cnt))
#cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
#peri = cv2.arcLength(cnt, True)
#approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#print("Num of sides: " + str(len(approx)))
lengthColec.append(len(approx))
#maxDef =max(defects[0].depth, defects[1].depth)
#print("convexityDefectDepth: " + str(maxDef))
#defHands.appned(cnt)
else:
handFound = False
return void
if max_i != -1:
return contour_list[max_i]
return None
except:
if max_i != -1:
return contour_list[max_i]
return None
#print("cont area average? lol: " + str(max_area))
return contour_list[max_i]
def draw_rect(frame):
rows, cols, _ = frame.shape
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array(
[6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 12 * rows / 30,
12 * rows / 30, 12 * rows / 30, 12 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30], dtype=np.uint32)
hand_rect_one_y = np.array(
[9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30 , 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30,
10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([160, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(backFrame, frame, hist):
global fWidth, fHeight
global bx, by, areaHand, cropped, sx, sy, width, height
range = (int)((7000**(1/2)))
width = 2*int(range*1)
height = 2* int(range*1) * 1.5
sx = bx-int(width/2)
ex = bx + int(width/2)
sy = by-int(height/2)
ey = by + int(height/2)
if (sx < 0):
sx = 0+10
ex = width+10
bx = int((ex+sx)/2)
if (sy < 0):
sy = 0+10
ey = height+10
by = int((ey+sy)/2)
if (ex > fWidth):
ex = fWidth-10
sx = ex - width
bx = int((ex+sx)/2)
if (ey > fHeight):
ey = fHeight-10
sy = ey - height
by = int((ey+sy)/2)
sx = int(sx)
sy = int(sy)
ex = int(ex)
ey = int (ey)
start = (int(sx),int(sy))
end = (int(ex),int(ey))
colour = (100,100,100)
cv2.rectangle(frame, start, end, colour, 1)
if len(frame[sy:ey, sx : ex])>0:
cropped = frame[sy:ey, sx : ex]
hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#disc = cv2.erode(disc, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(cropped, thresh)
#return cv2.bitwise_and(cv2.bitwise_not(backFrame),cv2.bitwise_and(frame, thresh))
def centroid(max_contour):
global bx, by
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
tempX = int(moment['m10'] / moment['m00'])
tempY = int(moment['m01'] / moment['m00'])
if ((bx-tempX)**2 + (by-tempY)**2 <= 100**2):
bx = tempX
by = tempY
#return cx, cy
#else:
#return None
def farthest_point(defects, contour, centroid):
global bx, by
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
#cx, cy = centroid
centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, bx), 2)
yp = cv2.pow(cv2.subtract(y, by), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def manage_image_opr(backFrame, frame, hand_hist):
global cx, cy, cropped, fWidth, fHeight
hist_mask_image = hist_masking(backFrame, frame, hand_hist)
contour_list = contours(frame, hist_mask_image)
max_cont = max_contour(frame, contour_list)
#cnt_centroid =
centroid(max_cont)
cnt_centroid = cx, cy
#cv2.circle(cropped, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)#cx, cy)#cnt_centroid)
#print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point)) #should be cnt_centroid
#cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
pointX = bx
pointY = by
height, width, __ = frame.shape
lowXBound = width*1/3
highXBound = width*2/3
lowYBound = height*1/3
highYBound = height*2/3
if (bx > lowXBound and bx < highXBound):
if (by> highYBound):
pyautogui.scroll(int((highYBound-by)/1))
elif (by < lowYBound):
pyautogui.scroll(int((lowYBound-by)/1))
elif (by > lowYBound and by < highYBound):
if (bx> highXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((highXBound-bx)/2))#('-')
pyautogui.keyUp('ctrl')
elif (bx < lowXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((lowXBound-bx)/2))#press('+')
pyautogui.keyUp('ctrl')
#coordinate = (str(far_point)).split()
#print (coordinate[0])
#print (coordinate([1]))
'''
print ("point x: ", pointX, "point y: ", pointY)
print(width, "width")
speed = int((fHeight/2 - by)/10)
#print("height", height)
print("scroll speed ", speed)
if (speed < -20 or speed > 20): #region verically for scroll
speed/=5
pyautogui.scroll(speed) #scrolls faster depending on height
else:
speed = int((fWidth/2 - bx)/10)
print("zoom speed ", speed)
if (speed > 20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('+')
pyautogui.keyUp('ctrl')
if (speed < -20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('-')
pyautogui.keyUp('ctrl')
'''
def plotHand():
|
def main():
global hand_hist, resetCount, handFound
#global background
#global have_background
global cx
global cy,bx,by
global defHands, newHands
is_hand_hist_created = False
capture = cv2.VideoCapture(0)
while capture.isOpened():
if resetCount <=0:
bx = int(fWidth/2)
by = int(fHeight/2)
#print("Reset data: " + str(bx) + " " + str(resetCount) + " " + str(by))
pressed_key = cv2.waitKey(1)
_, frame = capture.read()
if is_hand_hist_created==False:
txtColor = [0,255,0]
frame = cv2.flip(frame,1)
cv2.putText((frame), "Place hand over all green rectangles, then press \'z\'", (10,70),cv2.FONT_HERSHEY_SIMPLEX, .7,txtColor, 2)
frame = cv2.flip(frame,1)
if pressed_key & 0xFF == ord('z'):
#getBack(frame)
is_hand_hist_created = True
#frame = cutOutFace(frame)
hand_hist = hand_histogram(frame)
cx = int(frame.shape[0]/2)
cy = int(frame.shape[1]/2)
#if pressed_key & 0xFF == ord('r'):
#have_background = False
#getBack(frame)
if pressed_key & 0xFF == ord('e') and len(defHands)>0:
sum = 0
with open("Hands.txt", "wb") as fp:
all = newHands+ defHands
pickle.dump(all, fp)
sum += len(newHands)
#print(" tot sum " + str(sum))
break
if is_hand_hist_created:
if handFound:
resetCount = 100
else:
resetCount -=1
manage_image_opr(background, frame, hand_hist)
else:
frame = draw_rect(frame)
cv2.imshow("Live Feed", cv2.flip(rescale_frame(frame),1))
if pressed_key == 27:
#plotHand()
break
cv2.destroyAllWindows()
capture.release()
if __name__ == '__main__':
main()
| global lengthColec
data = np.random.normal(0, 21, 100)
bins = np.arange(0, 21, 1)
plot.xlim([min(data)-.5, max(data)+.5])
plot.hist(data, bins=bins, alpha=0.5)
plot.title('metaData plot')
plot.xlabel('side lengths)')
plot.ylabel('Number of occurance')
plot.show() | identifier_body |
handTrack1.py | import cv2
import numpy as np
import pickle
import random
from matplotlib import pyplot as plot
import pyautogui
import math
import time
#from object_tracker import startX, startY, endY, endX
hand_hist = None
traverse_point = []
total_rectangle = 16
hand_rect_one_x = None
hand_rect_one_y = None
hand_rect_two_x = None
hand_rect_two_y = None
have_background = False
background = None
defHands = []
newHands = []
lengthColec = []
cx = 200
cy = 200
bx = 200
by = 300
sx = 0
sy = 0
cropped = None
areaHand = 7000
startX = 0
endX = 0
width = 0
height = 0
startY = 0
endY = 0
backDivY = 10
backDivX = 17
backgroundHists = None
oldX = 0
oldY = 0
firstRun = 0
sameCount = 0
resetCount = 100
handFound = False
fWidth = 0
fHeight = 0
frameWidth = 0
frameHeight = 0
def rescale_frame(frame, wpercent=100, hpercent=100):
global fWidth, fHeight
fWidth = int(frame.shape[1] * wpercent / 100)
fHeight = int(frame.shape[0] * hpercent / 100)
##print(str(fWidth))
#print(str(fHeight))
return cv2.resize(frame, (fWidth, fHeight), interpolation=cv2.INTER_AREA)
def contours(frame, hist_mask_image):
#gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255, 0)
'''
hsv = cv2.cvtColor(hist_mask_image,cv2.COLOR_BGR2HSV)
ave = [0, 0, 0]
allPixR = []
allPixB = []
allPixG = []
for i in (0,len(hsv)-1):
for j in (0,len(hsv[0])-1):
ave += hsv[i][j]
allPixR.append(hsv[i][j][0])
allPixB.append(hsv[i][j][1])
allPixG.append(hsv[i][j][2])
ave = ave/(len(allPixR))
stdr = np.std(allPixR)+10
stdb = np.std(allPixB)+10
stdg = np.std(allPixG)+10
lower = np.array([int(ave[0]-stdr), int(ave[1]-stdb), int(ave[2]-stdg)])
upper = np.array([int(ave[0]+stdr), int(ave[1]+stdb), int(ave[2]+stdg)])
thresh = cv2.inRange(hsv, lower, upper)
'''
gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255,0)# cv2.ADAPTIVE_THRESH_MEAN_C)
#colour_mask = cv2.inRange(hsv,)
cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#hull = cv2.convexHull(cont, returnPoints=False)
#defects = cv2.convexityDefects(cont, hull)
#if (len(defects) >= 2):
#cv2.drawContours(frame, cont, -1, (0,255,0), 3)
return cont
def max_contour(frame, contour_list):
global bx, by, sx, sy, handFound
global defHands
global newHands
global lengthColec
global areaHand, cropped
global fWidth, fHeight
global resetCount
neededHands = 0
handFound = False
exMatch = None
max_i = -1
max_area = 0
min = 7000
max = 30000
tempx = 0
tempy = 0
with open("Hands.txt", "rb") as fp:
defHands = pickle.load(fp)
#print("\n\n numHands" +str(len(defHands)) + "\n\n")
#print("\n\n cont list " +str(len(defHands)) + "\n\n")
for i in range(len(contour_list)):
#time.sleep(1)
cnt = contour_list[i]
area_cnt = cv2.contourArea(cnt)
hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
moment = cv2.moments(cnt)
if moment['m00'] != 0:
tempx = int(moment['m10'] / moment['m00']) + sx
tempy = int(moment['m01'] / moment['m00']) + sy
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#if (len(cnt)>4):
#print(str(cv2.fitEllipse(cnt)))
#print("Num of sides: " + str(len(approx)))
#lengthColec.append(len(approx))
center = (bx,by)
colourCenter = (10,10,10)
cv2.circle(frame, center, 10,colourCenter,2)
numMatch = 0
for j in ((defHands)):
if (cv2.matchShapes(j, cnt, 1, 0.0) < 0.02) and abs(cv2.contourArea(j)-cv2.contourArea(cnt))<7000:
numMatch+=1
exMatch = j
if numMatch > neededHands:
break
#print("matchAve so far: " + str(matchAve))
if len(defHands) > 0:
pMatch = numMatch/ len(defHands)
else:
pMatch = -1
#print("\n\n match is: " + str(matchAve) +"\n\n")
try:
#if ( len(approx == 3) or (len(approx) >= 11 and len (aprox) < 14) or (len(approx) >= 8 and len (aprox) <= 9) or (len(approx) >= 18 and len (aprox) <= 19)) and len(defects) >=2 and area_cnt > 9000 and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 200**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
#print("Center: " + str(tempx) + ", "+ str(tempy))
#print("Should be : " + str(bx) + ", "+ str(by))
#print("bad")
#cv2.circle(frame, bx,by, 10,[10,10,10]
#print("num matched: " +str(numMatch))
if numMatch >neededHands and area_cnt > 2000 and ((tempx-bx)**2 + (tempy-by)**2 < 150**2) :#and (areaHand == -1 or abs(area_cnt -areaHand) <2000) and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 100**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
handFound = True
#if numMatch > 0 :#and ((tempx-bx)**2 + (tempy-by)**2 < 100**2) and and len(approx)>=5 and len(approx) <=19 and len(defects) >=2
#area_cnt>max_area
#print("\n\n in \n\n")
cv2.drawContours(cropped, cnt, -1, (0,255,0), 3)
#cv2.drawContours(cropped, exMatch, -1, (255,0,255), 3)
newHands.append(cnt)
areaHand = area_cnt
bx = tempx
by = tempy
'''
if bx < int(width/2)+20:
bx = int(width/2)
if bx > fWidth-int(width/2):
bx = fWidth-int(width/2)
if by < int(height/2):
by = int(height/2)
if by > fHeight-int(height/2):
by = fHeight-int(height/2)
'''
#print("Center: " + str(bx) + ", "+ str(by))
max_area = area_cnt
max_i = i
#print("accepted area" + str(area_cnt))
#cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
#peri = cv2.arcLength(cnt, True)
#approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#print("Num of sides: " + str(len(approx)))
lengthColec.append(len(approx))
#maxDef =max(defects[0].depth, defects[1].depth)
#print("convexityDefectDepth: " + str(maxDef))
#defHands.appned(cnt)
else:
handFound = False
return void
if max_i != -1:
return contour_list[max_i]
return None
except:
if max_i != -1:
return contour_list[max_i]
return None
#print("cont area average? lol: " + str(max_area))
return contour_list[max_i]
def draw_rect(frame):
rows, cols, _ = frame.shape
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array(
[6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 12 * rows / 30,
12 * rows / 30, 12 * rows / 30, 12 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30], dtype=np.uint32)
hand_rect_one_y = np.array(
[9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30 , 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30,
10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([160, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(backFrame, frame, hist):
global fWidth, fHeight
global bx, by, areaHand, cropped, sx, sy, width, height
range = (int)((7000**(1/2)))
width = 2*int(range*1)
height = 2* int(range*1) * 1.5
sx = bx-int(width/2)
ex = bx + int(width/2)
sy = by-int(height/2)
ey = by + int(height/2)
if (sx < 0):
sx = 0+10
ex = width+10
bx = int((ex+sx)/2)
if (sy < 0):
sy = 0+10
ey = height+10
by = int((ey+sy)/2)
if (ex > fWidth):
ex = fWidth-10
sx = ex - width
bx = int((ex+sx)/2)
if (ey > fHeight):
ey = fHeight-10
sy = ey - height
by = int((ey+sy)/2)
sx = int(sx)
sy = int(sy)
ex = int(ex)
ey = int (ey)
start = (int(sx),int(sy))
end = (int(ex),int(ey))
colour = (100,100,100)
cv2.rectangle(frame, start, end, colour, 1)
if len(frame[sy:ey, sx : ex])>0:
cropped = frame[sy:ey, sx : ex]
hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#disc = cv2.erode(disc, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(cropped, thresh)
#return cv2.bitwise_and(cv2.bitwise_not(backFrame),cv2.bitwise_and(frame, thresh))
def centroid(max_contour):
global bx, by
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
tempX = int(moment['m10'] / moment['m00'])
tempY = int(moment['m01'] / moment['m00'])
if ((bx-tempX)**2 + (by-tempY)**2 <= 100**2):
bx = tempX
by = tempY
#return cx, cy
#else:
#return None
def | (defects, contour, centroid):
global bx, by
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
#cx, cy = centroid
centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, bx), 2)
yp = cv2.pow(cv2.subtract(y, by), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def manage_image_opr(backFrame, frame, hand_hist):
global cx, cy, cropped, fWidth, fHeight
hist_mask_image = hist_masking(backFrame, frame, hand_hist)
contour_list = contours(frame, hist_mask_image)
max_cont = max_contour(frame, contour_list)
#cnt_centroid =
centroid(max_cont)
cnt_centroid = cx, cy
#cv2.circle(cropped, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)#cx, cy)#cnt_centroid)
#print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point)) #should be cnt_centroid
#cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
pointX = bx
pointY = by
height, width, __ = frame.shape
lowXBound = width*1/3
highXBound = width*2/3
lowYBound = height*1/3
highYBound = height*2/3
if (bx > lowXBound and bx < highXBound):
if (by> highYBound):
pyautogui.scroll(int((highYBound-by)/1))
elif (by < lowYBound):
pyautogui.scroll(int((lowYBound-by)/1))
elif (by > lowYBound and by < highYBound):
if (bx> highXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((highXBound-bx)/2))#('-')
pyautogui.keyUp('ctrl')
elif (bx < lowXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((lowXBound-bx)/2))#press('+')
pyautogui.keyUp('ctrl')
#coordinate = (str(far_point)).split()
#print (coordinate[0])
#print (coordinate([1]))
'''
print ("point x: ", pointX, "point y: ", pointY)
print(width, "width")
speed = int((fHeight/2 - by)/10)
#print("height", height)
print("scroll speed ", speed)
if (speed < -20 or speed > 20): #region verically for scroll
speed/=5
pyautogui.scroll(speed) #scrolls faster depending on height
else:
speed = int((fWidth/2 - bx)/10)
print("zoom speed ", speed)
if (speed > 20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('+')
pyautogui.keyUp('ctrl')
if (speed < -20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('-')
pyautogui.keyUp('ctrl')
'''
def plotHand():
global lengthColec
data = np.random.normal(0, 21, 100)
bins = np.arange(0, 21, 1)
plot.xlim([min(data)-.5, max(data)+.5])
plot.hist(data, bins=bins, alpha=0.5)
plot.title('metaData plot')
plot.xlabel('side lengths)')
plot.ylabel('Number of occurance')
plot.show()
def main():
global hand_hist, resetCount, handFound
#global background
#global have_background
global cx
global cy,bx,by
global defHands, newHands
is_hand_hist_created = False
capture = cv2.VideoCapture(0)
while capture.isOpened():
if resetCount <=0:
bx = int(fWidth/2)
by = int(fHeight/2)
#print("Reset data: " + str(bx) + " " + str(resetCount) + " " + str(by))
pressed_key = cv2.waitKey(1)
_, frame = capture.read()
if is_hand_hist_created==False:
txtColor = [0,255,0]
frame = cv2.flip(frame,1)
cv2.putText((frame), "Place hand over all green rectangles, then press \'z\'", (10,70),cv2.FONT_HERSHEY_SIMPLEX, .7,txtColor, 2)
frame = cv2.flip(frame,1)
if pressed_key & 0xFF == ord('z'):
#getBack(frame)
is_hand_hist_created = True
#frame = cutOutFace(frame)
hand_hist = hand_histogram(frame)
cx = int(frame.shape[0]/2)
cy = int(frame.shape[1]/2)
#if pressed_key & 0xFF == ord('r'):
#have_background = False
#getBack(frame)
if pressed_key & 0xFF == ord('e') and len(defHands)>0:
sum = 0
with open("Hands.txt", "wb") as fp:
all = newHands+ defHands
pickle.dump(all, fp)
sum += len(newHands)
#print(" tot sum " + str(sum))
break
if is_hand_hist_created:
if handFound:
resetCount = 100
else:
resetCount -=1
manage_image_opr(background, frame, hand_hist)
else:
frame = draw_rect(frame)
cv2.imshow("Live Feed", cv2.flip(rescale_frame(frame),1))
if pressed_key == 27:
#plotHand()
break
cv2.destroyAllWindows()
capture.release()
if __name__ == '__main__':
main()
| farthest_point | identifier_name |
handTrack1.py | import cv2
import numpy as np
import pickle
import random
from matplotlib import pyplot as plot
import pyautogui
import math
import time
#from object_tracker import startX, startY, endY, endX
hand_hist = None
traverse_point = []
total_rectangle = 16
hand_rect_one_x = None
hand_rect_one_y = None
hand_rect_two_x = None
hand_rect_two_y = None
have_background = False
background = None
defHands = []
newHands = []
lengthColec = []
cx = 200
cy = 200
bx = 200
by = 300
sx = 0
sy = 0
cropped = None
areaHand = 7000
startX = 0
endX = 0
width = 0
height = 0
startY = 0
endY = 0
backDivY = 10
backDivX = 17
backgroundHists = None
oldX = 0
oldY = 0
firstRun = 0
sameCount = 0
resetCount = 100
handFound = False
fWidth = 0
fHeight = 0
frameWidth = 0
frameHeight = 0
def rescale_frame(frame, wpercent=100, hpercent=100):
global fWidth, fHeight
fWidth = int(frame.shape[1] * wpercent / 100)
fHeight = int(frame.shape[0] * hpercent / 100)
##print(str(fWidth))
#print(str(fHeight))
return cv2.resize(frame, (fWidth, fHeight), interpolation=cv2.INTER_AREA)
def contours(frame, hist_mask_image):
#gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255, 0)
'''
hsv = cv2.cvtColor(hist_mask_image,cv2.COLOR_BGR2HSV)
ave = [0, 0, 0]
allPixR = []
allPixB = []
allPixG = []
for i in (0,len(hsv)-1):
for j in (0,len(hsv[0])-1):
ave += hsv[i][j]
allPixR.append(hsv[i][j][0])
allPixB.append(hsv[i][j][1])
allPixG.append(hsv[i][j][2])
ave = ave/(len(allPixR))
stdr = np.std(allPixR)+10
stdb = np.std(allPixB)+10
stdg = np.std(allPixG)+10
lower = np.array([int(ave[0]-stdr), int(ave[1]-stdb), int(ave[2]-stdg)])
upper = np.array([int(ave[0]+stdr), int(ave[1]+stdb), int(ave[2]+stdg)])
thresh = cv2.inRange(hsv, lower, upper)
'''
gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255,0)# cv2.ADAPTIVE_THRESH_MEAN_C)
#colour_mask = cv2.inRange(hsv,)
cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#hull = cv2.convexHull(cont, returnPoints=False)
#defects = cv2.convexityDefects(cont, hull)
#if (len(defects) >= 2):
#cv2.drawContours(frame, cont, -1, (0,255,0), 3)
return cont
def max_contour(frame, contour_list):
global bx, by, sx, sy, handFound
global defHands
global newHands
global lengthColec
global areaHand, cropped
global fWidth, fHeight
global resetCount
neededHands = 0
handFound = False
exMatch = None
max_i = -1
max_area = 0
min = 7000
max = 30000
tempx = 0
tempy = 0
with open("Hands.txt", "rb") as fp:
defHands = pickle.load(fp)
#print("\n\n numHands" +str(len(defHands)) + "\n\n")
#print("\n\n cont list " +str(len(defHands)) + "\n\n")
for i in range(len(contour_list)):
#time.sleep(1)
cnt = contour_list[i]
area_cnt = cv2.contourArea(cnt)
hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
moment = cv2.moments(cnt)
if moment['m00'] != 0:
tempx = int(moment['m10'] / moment['m00']) + sx
tempy = int(moment['m01'] / moment['m00']) + sy
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#if (len(cnt)>4):
#print(str(cv2.fitEllipse(cnt)))
#print("Num of sides: " + str(len(approx)))
#lengthColec.append(len(approx))
center = (bx,by)
colourCenter = (10,10,10)
cv2.circle(frame, center, 10,colourCenter,2)
numMatch = 0
for j in ((defHands)):
if (cv2.matchShapes(j, cnt, 1, 0.0) < 0.02) and abs(cv2.contourArea(j)-cv2.contourArea(cnt))<7000:
numMatch+=1
exMatch = j
if numMatch > neededHands:
break
#print("matchAve so far: " + str(matchAve))
if len(defHands) > 0:
pMatch = numMatch/ len(defHands)
else:
pMatch = -1
#print("\n\n match is: " + str(matchAve) +"\n\n")
try:
#if ( len(approx == 3) or (len(approx) >= 11 and len (aprox) < 14) or (len(approx) >= 8 and len (aprox) <= 9) or (len(approx) >= 18 and len (aprox) <= 19)) and len(defects) >=2 and area_cnt > 9000 and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 200**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
#print("Center: " + str(tempx) + ", "+ str(tempy))
#print("Should be : " + str(bx) + ", "+ str(by))
#print("bad")
#cv2.circle(frame, bx,by, 10,[10,10,10]
#print("num matched: " +str(numMatch))
if numMatch >neededHands and area_cnt > 2000 and ((tempx-bx)**2 + (tempy-by)**2 < 150**2) :#and (areaHand == -1 or abs(area_cnt -areaHand) <2000) and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 100**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
handFound = True
#if numMatch > 0 :#and ((tempx-bx)**2 + (tempy-by)**2 < 100**2) and and len(approx)>=5 and len(approx) <=19 and len(defects) >=2
#area_cnt>max_area
#print("\n\n in \n\n")
cv2.drawContours(cropped, cnt, -1, (0,255,0), 3)
#cv2.drawContours(cropped, exMatch, -1, (255,0,255), 3)
newHands.append(cnt)
areaHand = area_cnt
bx = tempx
by = tempy
'''
if bx < int(width/2)+20:
bx = int(width/2)
if bx > fWidth-int(width/2):
bx = fWidth-int(width/2)
if by < int(height/2):
by = int(height/2)
if by > fHeight-int(height/2):
by = fHeight-int(height/2)
'''
#print("Center: " + str(bx) + ", "+ str(by))
max_area = area_cnt
max_i = i
#print("accepted area" + str(area_cnt))
#cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
#peri = cv2.arcLength(cnt, True)
#approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#print("Num of sides: " + str(len(approx)))
lengthColec.append(len(approx))
#maxDef =max(defects[0].depth, defects[1].depth)
#print("convexityDefectDepth: " + str(maxDef))
#defHands.appned(cnt)
else:
handFound = False
return void
if max_i != -1:
return contour_list[max_i]
return None
except:
if max_i != -1:
return contour_list[max_i]
return None
#print("cont area average? lol: " + str(max_area))
return contour_list[max_i]
def draw_rect(frame):
rows, cols, _ = frame.shape
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array(
[6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 12 * rows / 30,
12 * rows / 30, 12 * rows / 30, 12 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30], dtype=np.uint32)
hand_rect_one_y = np.array(
[9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30 , 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30,
10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([160, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(backFrame, frame, hist):
global fWidth, fHeight
global bx, by, areaHand, cropped, sx, sy, width, height
range = (int)((7000**(1/2)))
width = 2*int(range*1)
height = 2* int(range*1) * 1.5
sx = bx-int(width/2)
ex = bx + int(width/2)
sy = by-int(height/2)
ey = by + int(height/2)
if (sx < 0):
sx = 0+10
ex = width+10
bx = int((ex+sx)/2)
if (sy < 0):
sy = 0+10
ey = height+10
by = int((ey+sy)/2)
if (ex > fWidth):
ex = fWidth-10
sx = ex - width
bx = int((ex+sx)/2)
if (ey > fHeight):
ey = fHeight-10
sy = ey - height
by = int((ey+sy)/2)
sx = int(sx)
sy = int(sy)
ex = int(ex)
ey = int (ey)
start = (int(sx),int(sy))
end = (int(ex),int(ey))
colour = (100,100,100)
cv2.rectangle(frame, start, end, colour, 1)
if len(frame[sy:ey, sx : ex])>0:
cropped = frame[sy:ey, sx : ex]
hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#disc = cv2.erode(disc, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(cropped, thresh)
#return cv2.bitwise_and(cv2.bitwise_not(backFrame),cv2.bitwise_and(frame, thresh))
def centroid(max_contour):
global bx, by
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
tempX = int(moment['m10'] / moment['m00'])
tempY = int(moment['m01'] / moment['m00'])
if ((bx-tempX)**2 + (by-tempY)**2 <= 100**2):
bx = tempX
by = tempY
#return cx, cy
#else:
#return None
def farthest_point(defects, contour, centroid):
global bx, by
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
#cx, cy = centroid
centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, bx), 2)
yp = cv2.pow(cv2.subtract(y, by), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def manage_image_opr(backFrame, frame, hand_hist):
global cx, cy, cropped, fWidth, fHeight
hist_mask_image = hist_masking(backFrame, frame, hand_hist)
contour_list = contours(frame, hist_mask_image)
max_cont = max_contour(frame, contour_list)
#cnt_centroid =
centroid(max_cont)
cnt_centroid = cx, cy
#cv2.circle(cropped, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)#cx, cy)#cnt_centroid)
#print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point)) #should be cnt_centroid
#cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
pointX = bx
pointY = by
height, width, __ = frame.shape
lowXBound = width*1/3
highXBound = width*2/3
lowYBound = height*1/3
highYBound = height*2/3
if (bx > lowXBound and bx < highXBound):
if (by> highYBound):
pyautogui.scroll(int((highYBound-by)/1))
elif (by < lowYBound):
pyautogui.scroll(int((lowYBound-by)/1))
elif (by > lowYBound and by < highYBound):
if (bx> highXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((highXBound-bx)/2))#('-')
pyautogui.keyUp('ctrl')
elif (bx < lowXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((lowXBound-bx)/2))#press('+')
pyautogui.keyUp('ctrl')
#coordinate = (str(far_point)).split()
#print (coordinate[0])
#print (coordinate([1]))
'''
print ("point x: ", pointX, "point y: ", pointY)
print(width, "width")
speed = int((fHeight/2 - by)/10)
#print("height", height)
print("scroll speed ", speed)
if (speed < -20 or speed > 20): #region verically for scroll
speed/=5
pyautogui.scroll(speed) #scrolls faster depending on height
else:
speed = int((fWidth/2 - bx)/10)
print("zoom speed ", speed)
if (speed > 20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('+')
pyautogui.keyUp('ctrl')
if (speed < -20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('-')
pyautogui.keyUp('ctrl')
'''
def plotHand():
global lengthColec
data = np.random.normal(0, 21, 100)
bins = np.arange(0, 21, 1)
plot.xlim([min(data)-.5, max(data)+.5])
plot.hist(data, bins=bins, alpha=0.5)
plot.title('metaData plot')
plot.xlabel('side lengths)')
plot.ylabel('Number of occurance')
plot.show()
def main():
global hand_hist, resetCount, handFound
#global background
#global have_background
global cx
global cy,bx,by
global defHands, newHands
is_hand_hist_created = False
capture = cv2.VideoCapture(0)
while capture.isOpened():
|
cv2.destroyAllWindows()
capture.release()
if __name__ == '__main__':
main()
| if resetCount <=0:
bx = int(fWidth/2)
by = int(fHeight/2)
#print("Reset data: " + str(bx) + " " + str(resetCount) + " " + str(by))
pressed_key = cv2.waitKey(1)
_, frame = capture.read()
if is_hand_hist_created==False:
txtColor = [0,255,0]
frame = cv2.flip(frame,1)
cv2.putText((frame), "Place hand over all green rectangles, then press \'z\'", (10,70),cv2.FONT_HERSHEY_SIMPLEX, .7,txtColor, 2)
frame = cv2.flip(frame,1)
if pressed_key & 0xFF == ord('z'):
#getBack(frame)
is_hand_hist_created = True
#frame = cutOutFace(frame)
hand_hist = hand_histogram(frame)
cx = int(frame.shape[0]/2)
cy = int(frame.shape[1]/2)
#if pressed_key & 0xFF == ord('r'):
#have_background = False
#getBack(frame)
if pressed_key & 0xFF == ord('e') and len(defHands)>0:
sum = 0
with open("Hands.txt", "wb") as fp:
all = newHands+ defHands
pickle.dump(all, fp)
sum += len(newHands)
#print(" tot sum " + str(sum))
break
if is_hand_hist_created:
if handFound:
resetCount = 100
else:
resetCount -=1
manage_image_opr(background, frame, hand_hist)
else:
frame = draw_rect(frame)
cv2.imshow("Live Feed", cv2.flip(rescale_frame(frame),1))
if pressed_key == 27:
#plotHand()
break | conditional_block |
handTrack1.py | import cv2
import numpy as np
import pickle
import random
from matplotlib import pyplot as plot
import pyautogui
import math
import time
#from object_tracker import startX, startY, endY, endX
hand_hist = None
traverse_point = []
total_rectangle = 16
hand_rect_one_x = None
hand_rect_one_y = None
hand_rect_two_x = None
hand_rect_two_y = None
have_background = False
background = None
defHands = []
newHands = []
lengthColec = []
cx = 200
cy = 200
bx = 200
by = 300
sx = 0
sy = 0
cropped = None
areaHand = 7000
startX = 0
endX = 0
width = 0
height = 0
startY = 0
endY = 0
backDivY = 10
backDivX = 17
backgroundHists = None
oldX = 0
oldY = 0
firstRun = 0
sameCount = 0
resetCount = 100
handFound = False
fWidth = 0
fHeight = 0
frameWidth = 0
frameHeight = 0
def rescale_frame(frame, wpercent=100, hpercent=100):
global fWidth, fHeight
fWidth = int(frame.shape[1] * wpercent / 100)
fHeight = int(frame.shape[0] * hpercent / 100)
##print(str(fWidth))
#print(str(fHeight))
return cv2.resize(frame, (fWidth, fHeight), interpolation=cv2.INTER_AREA)
def contours(frame, hist_mask_image):
#gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255, 0)
'''
hsv = cv2.cvtColor(hist_mask_image,cv2.COLOR_BGR2HSV)
ave = [0, 0, 0]
allPixR = []
allPixB = []
allPixG = []
for i in (0,len(hsv)-1):
for j in (0,len(hsv[0])-1):
ave += hsv[i][j]
allPixR.append(hsv[i][j][0])
allPixB.append(hsv[i][j][1])
allPixG.append(hsv[i][j][2])
ave = ave/(len(allPixR))
stdr = np.std(allPixR)+10
stdb = np.std(allPixB)+10
stdg = np.std(allPixG)+10
lower = np.array([int(ave[0]-stdr), int(ave[1]-stdb), int(ave[2]-stdg)])
upper = np.array([int(ave[0]+stdr), int(ave[1]+stdb), int(ave[2]+stdg)])
thresh = cv2.inRange(hsv, lower, upper)
'''
gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255,0)# cv2.ADAPTIVE_THRESH_MEAN_C)
#colour_mask = cv2.inRange(hsv,)
cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#hull = cv2.convexHull(cont, returnPoints=False)
#defects = cv2.convexityDefects(cont, hull)
#if (len(defects) >= 2):
#cv2.drawContours(frame, cont, -1, (0,255,0), 3)
return cont
def max_contour(frame, contour_list):
global bx, by, sx, sy, handFound
global defHands
global newHands
global lengthColec
global areaHand, cropped
global fWidth, fHeight
global resetCount
neededHands = 0
handFound = False
exMatch = None
max_i = -1
max_area = 0
min = 7000
max = 30000
tempx = 0
tempy = 0
with open("Hands.txt", "rb") as fp:
defHands = pickle.load(fp)
#print("\n\n numHands" +str(len(defHands)) + "\n\n")
#print("\n\n cont list " +str(len(defHands)) + "\n\n")
for i in range(len(contour_list)):
#time.sleep(1)
cnt = contour_list[i]
area_cnt = cv2.contourArea(cnt)
hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
moment = cv2.moments(cnt)
if moment['m00'] != 0:
tempx = int(moment['m10'] / moment['m00']) + sx
tempy = int(moment['m01'] / moment['m00']) + sy
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#if (len(cnt)>4):
#print(str(cv2.fitEllipse(cnt)))
#print("Num of sides: " + str(len(approx)))
#lengthColec.append(len(approx))
center = (bx,by)
colourCenter = (10,10,10)
cv2.circle(frame, center, 10,colourCenter,2)
numMatch = 0
for j in ((defHands)):
if (cv2.matchShapes(j, cnt, 1, 0.0) < 0.02) and abs(cv2.contourArea(j)-cv2.contourArea(cnt))<7000:
numMatch+=1
exMatch = j
if numMatch > neededHands:
break
#print("matchAve so far: " + str(matchAve))
if len(defHands) > 0:
pMatch = numMatch/ len(defHands)
else:
pMatch = -1
#print("\n\n match is: " + str(matchAve) +"\n\n")
try:
#if ( len(approx == 3) or (len(approx) >= 11 and len (aprox) < 14) or (len(approx) >= 8 and len (aprox) <= 9) or (len(approx) >= 18 and len (aprox) <= 19)) and len(defects) >=2 and area_cnt > 9000 and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 200**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
#print("Center: " + str(tempx) + ", "+ str(tempy))
#print("Should be : " + str(bx) + ", "+ str(by))
#print("bad")
#cv2.circle(frame, bx,by, 10,[10,10,10]
#print("num matched: " +str(numMatch))
if numMatch >neededHands and area_cnt > 2000 and ((tempx-bx)**2 + (tempy-by)**2 < 150**2) :#and (areaHand == -1 or abs(area_cnt -areaHand) <2000) and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 100**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
handFound = True
#if numMatch > 0 :#and ((tempx-bx)**2 + (tempy-by)**2 < 100**2) and and len(approx)>=5 and len(approx) <=19 and len(defects) >=2
#area_cnt>max_area
#print("\n\n in \n\n")
cv2.drawContours(cropped, cnt, -1, (0,255,0), 3)
#cv2.drawContours(cropped, exMatch, -1, (255,0,255), 3)
newHands.append(cnt)
areaHand = area_cnt
bx = tempx
by = tempy
'''
if bx < int(width/2)+20:
bx = int(width/2)
if bx > fWidth-int(width/2):
bx = fWidth-int(width/2)
if by < int(height/2):
by = int(height/2)
if by > fHeight-int(height/2):
by = fHeight-int(height/2)
'''
#print("Center: " + str(bx) + ", "+ str(by))
max_area = area_cnt
max_i = i
#print("accepted area" + str(area_cnt))
#cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
#peri = cv2.arcLength(cnt, True)
#approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#print("Num of sides: " + str(len(approx)))
lengthColec.append(len(approx))
#maxDef =max(defects[0].depth, defects[1].depth)
#print("convexityDefectDepth: " + str(maxDef))
#defHands.appned(cnt)
else:
handFound = False
return void
if max_i != -1:
return contour_list[max_i]
return None
except:
if max_i != -1:
return contour_list[max_i]
return None
#print("cont area average? lol: " + str(max_area))
return contour_list[max_i]
def draw_rect(frame):
rows, cols, _ = frame.shape
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array(
[6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 12 * rows / 30,
12 * rows / 30, 12 * rows / 30, 12 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30], dtype=np.uint32)
hand_rect_one_y = np.array(
[9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30 , 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30,
10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([160, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(backFrame, frame, hist):
global fWidth, fHeight
global bx, by, areaHand, cropped, sx, sy, width, height
range = (int)((7000**(1/2)))
width = 2*int(range*1)
height = 2* int(range*1) * 1.5
sx = bx-int(width/2)
ex = bx + int(width/2)
sy = by-int(height/2)
ey = by + int(height/2)
if (sx < 0):
sx = 0+10
ex = width+10
bx = int((ex+sx)/2)
if (sy < 0):
sy = 0+10
ey = height+10
by = int((ey+sy)/2)
if (ex > fWidth):
ex = fWidth-10
sx = ex - width
bx = int((ex+sx)/2)
if (ey > fHeight):
ey = fHeight-10
sy = ey - height
by = int((ey+sy)/2)
sx = int(sx)
sy = int(sy)
ex = int(ex)
ey = int (ey)
start = (int(sx),int(sy))
end = (int(ex),int(ey))
colour = (100,100,100)
cv2.rectangle(frame, start, end, colour, 1)
if len(frame[sy:ey, sx : ex])>0:
cropped = frame[sy:ey, sx : ex]
hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#disc = cv2.erode(disc, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(cropped, thresh)
#return cv2.bitwise_and(cv2.bitwise_not(backFrame),cv2.bitwise_and(frame, thresh))
def centroid(max_contour):
global bx, by
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
tempX = int(moment['m10'] / moment['m00'])
tempY = int(moment['m01'] / moment['m00'])
if ((bx-tempX)**2 + (by-tempY)**2 <= 100**2):
bx = tempX
by = tempY
#return cx, cy
#else:
#return None
def farthest_point(defects, contour, centroid):
global bx, by
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
#cx, cy = centroid
centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, bx), 2)
yp = cv2.pow(cv2.subtract(y, by), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def manage_image_opr(backFrame, frame, hand_hist):
global cx, cy, cropped, fWidth, fHeight
hist_mask_image = hist_masking(backFrame, frame, hand_hist)
contour_list = contours(frame, hist_mask_image)
max_cont = max_contour(frame, contour_list)
#cnt_centroid =
centroid(max_cont)
cnt_centroid = cx, cy
#cv2.circle(cropped, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)#cx, cy)#cnt_centroid)
#print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point)) #should be cnt_centroid
#cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
pointX = bx
pointY = by
height, width, __ = frame.shape
lowXBound = width*1/3
highXBound = width*2/3
lowYBound = height*1/3
highYBound = height*2/3
if (bx > lowXBound and bx < highXBound):
if (by> highYBound):
pyautogui.scroll(int((highYBound-by)/1))
elif (by < lowYBound):
pyautogui.scroll(int((lowYBound-by)/1))
elif (by > lowYBound and by < highYBound):
if (bx> highXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((highXBound-bx)/2))#('-')
pyautogui.keyUp('ctrl')
elif (bx < lowXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((lowXBound-bx)/2))#press('+')
pyautogui.keyUp('ctrl')
#coordinate = (str(far_point)).split()
#print (coordinate[0])
#print (coordinate([1]))
'''
print ("point x: ", pointX, "point y: ", pointY)
print(width, "width")
speed = int((fHeight/2 - by)/10)
#print("height", height)
print("scroll speed ", speed)
if (speed < -20 or speed > 20): #region verically for scroll
speed/=5
pyautogui.scroll(speed) #scrolls faster depending on height
else:
speed = int((fWidth/2 - bx)/10)
print("zoom speed ", speed)
if (speed > 20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('+')
pyautogui.keyUp('ctrl')
if (speed < -20):
speed/=5 | pyautogui.keyDown('ctrl')
pyautogui.press('-')
pyautogui.keyUp('ctrl')
'''
def plotHand():
global lengthColec
data = np.random.normal(0, 21, 100)
bins = np.arange(0, 21, 1)
plot.xlim([min(data)-.5, max(data)+.5])
plot.hist(data, bins=bins, alpha=0.5)
plot.title('metaData plot')
plot.xlabel('side lengths)')
plot.ylabel('Number of occurance')
plot.show()
def main():
global hand_hist, resetCount, handFound
#global background
#global have_background
global cx
global cy,bx,by
global defHands, newHands
is_hand_hist_created = False
capture = cv2.VideoCapture(0)
while capture.isOpened():
if resetCount <=0:
bx = int(fWidth/2)
by = int(fHeight/2)
#print("Reset data: " + str(bx) + " " + str(resetCount) + " " + str(by))
pressed_key = cv2.waitKey(1)
_, frame = capture.read()
if is_hand_hist_created==False:
txtColor = [0,255,0]
frame = cv2.flip(frame,1)
cv2.putText((frame), "Place hand over all green rectangles, then press \'z\'", (10,70),cv2.FONT_HERSHEY_SIMPLEX, .7,txtColor, 2)
frame = cv2.flip(frame,1)
if pressed_key & 0xFF == ord('z'):
#getBack(frame)
is_hand_hist_created = True
#frame = cutOutFace(frame)
hand_hist = hand_histogram(frame)
cx = int(frame.shape[0]/2)
cy = int(frame.shape[1]/2)
#if pressed_key & 0xFF == ord('r'):
#have_background = False
#getBack(frame)
if pressed_key & 0xFF == ord('e') and len(defHands)>0:
sum = 0
with open("Hands.txt", "wb") as fp:
all = newHands+ defHands
pickle.dump(all, fp)
sum += len(newHands)
#print(" tot sum " + str(sum))
break
if is_hand_hist_created:
if handFound:
resetCount = 100
else:
resetCount -=1
manage_image_opr(background, frame, hand_hist)
else:
frame = draw_rect(frame)
cv2.imshow("Live Feed", cv2.flip(rescale_frame(frame),1))
if pressed_key == 27:
#plotHand()
break
cv2.destroyAllWindows()
capture.release()
if __name__ == '__main__':
main() | random_line_split | |
histogramMatching.py | import cv2,urllib,sys,math, sys
import numpy as np
import inspect
from matplotlib import pyplot as plt
#FUNCTIONS
#executes first part of the program. i.e to find the difference between two frames
def getDifferenceHulls(imgFrame1,imgFrame2):
#making duplicates of the above frames
imgFrame1Copy = imgFrame1.copy()
imgFrame2Copy = imgFrame2.copy()
#changing the colorspace to grayscale
imgFrame1Copy = cv2.cvtColor(imgFrame1Copy,cv2.COLOR_BGR2GRAY)
imgFrame2Copy = cv2.cvtColor(imgFrame2Copy,cv2.COLOR_BGR2GRAY)
#applying gaussianblur
imgFrame1Copy = cv2.GaussianBlur(imgFrame1Copy,(5,5),0)
imgFrame2Copy = cv2.GaussianBlur(imgFrame2Copy,(5,5),0)
#finding the difference of the two frames and thresholding the diff
imgDifference = cv2.absdiff(imgFrame1Copy,imgFrame2Copy)
_,imgThresh = cv2.threshold(imgDifference,30,255,cv2.THRESH_BINARY)
# cv2.imshow("imgThresh",imgThresh)
# morphological operations: dilation and erosion
kernel = np.ones((5,5),np.uint8)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.erode(imgThresh,kernel,iterations = 1)
#finding contours of the thresholded image
contours, hierarchy = cv2.findContours(imgThresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#finding and drawing convex hulls
hulls = [] #used to store hulls
for cnt in contours:
hulls.append(cv2.convexHull(cnt))
return hulls
#draws the rectangles on the motion detected object
def drawBlobInfoOnImage(blobs,imgFrame2Copy):
for i in range(len(blobs)):
if (blobs[i].blnStillBeingTracked == True):
rect_corner1 = (blobs[i].currentBoundingRect[0],blobs[i].currentBoundingRect[1])
rect_corner2 = (blobs[i].currentBoundingRect[0]+blobs[i].width, blobs[i].currentBoundingRect[1]+blobs[i].height)
# font settings
intFontFace = cv2.FONT_HERSHEY_SIMPLEX;
dblFontScale = blobs[i].dblCurrentDiagonalSize / 60.0
intFontThickness = int(round(dblFontScale * 1.0))
point = ((rect_corner1[0]+rect_corner2[0])/2,(rect_corner1[1]+rect_corner2[1])/2)
# labels blob numbers
cv2.putText(imgFrame2Copy, str(i), blobs[i].centerPositions[-1], intFontFace, dblFontScale, (0,255,0), intFontThickness);
# draws box around the blob
cv2.rectangle(imgFrame2Copy, rect_corner1,rect_corner2, (0,0,255))
#draws the contours on the image
def drawAndShowContours(imageSize,contours,strImageName):
image = np.zeros(imageSize, dtype=np.uint8)
cv2.drawContours(image, contours, -1,(255,255,255), -1)
cv2.imshow(strImageName, image);
#draws the contours similar to the drawAndShowContours function
#but here the input provided is not the contours but object of class Blob
def drawAndShowBlobs(imageSize,blobs,strWindowsName):
image = np.zeros(imageSize, dtype=np.uint8)
contours = []
for blob in blobs:
if blob.blnStillBeingTracked == True:
contours.append(blob.currentContour)
cv2.drawContours(image, contours, -1,(255,255,255), -1);
cv2.imshow(strWindowsName, image);
#find the distance between two points p1 and p2
def | (point1,point2):
intX = abs(point1[0] - point2[0])
intY = abs(point1[1] - point2[1])
return math.sqrt(math.pow(intX, 2) + math.pow(intY, 2))
#matching algorithm to corelate two blob objects by matching it with the expected one
def matchCurrentFrameBlobsToExistingBlobs(existingBlobs,currentFrameBlobs):
for existingBlob in existingBlobs:
existingBlob.blnCurrentMatchFoundOrNewBlob = False
existingBlob.predictNextPosition()
for currentFrameBlob in currentFrameBlobs:
intIndexOfLeastDistance = 0
dblLeastDistance = 100000.0
for i in range(len(existingBlobs)):
if (existingBlobs[i].blnStillBeingTracked == True):
dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions[-1], existingBlobs[i].predictedNextPosition)
# print dblDistance
if (dblDistance < dblLeastDistance):
dblLeastDistance = dblDistance
intIndexOfLeastDistance = i
if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15): #1.15 origianal, 5
addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance)
else:
addNewBlob(currentFrameBlob, existingBlobs)
for existingBlob in existingBlobs:
if (existingBlob.blnCurrentMatchFoundOrNewBlob == False):
existingBlob.intNumOfConsecutiveFramesWithoutAMatch +=1;
if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5):
existingBlob.blnStillBeingTracked = False;
#adds the details of the matching blob to the existingBlob
def addBlobToExistingBlobs(currentFrameBlob,existingBlobs,i):
# print 'found continuos blob'
existingBlobs[i].noOfTimesAppeared += 1
existingBlobs[i].rois.append(currentFrameBlob.currentROI)
existingBlobs[i].featureMatches += currentFrameBlob.featureMatches
existingBlobs[i].noOfTimesAppeared += currentFrameBlob.noOfTimesAppeared
existingBlobs[i].currentContour = currentFrameBlob.currentContour;
existingBlobs[i].currentBoundingRect = currentFrameBlob.currentBoundingRect;
existingBlobs[i].centerPositions.append(currentFrameBlob.centerPositions[-1])
# if len(existingBlobs[i].centerPositions) > 30:
# del existingBlobs[i].centerPositions[0]
existingBlobs[i].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize;
existingBlobs[i].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio;
existingBlobs[i].blnStillBeingTracked = True;
existingBlobs[i].blnCurrentMatchFoundOrNewBlob = True;
#adds new blob to the list
def addNewBlob(currentFrameBlob,existingBlobs):
currentFrameBlob.blnCurrentMatchFoundOrNewBlob = True
existingBlobs.append(currentFrameBlob)
#CLASS
#class Blob consisting of variables and functions related to it
class Blob:
#functions
def printInfo(self):
print 'area: '+str(self.area)+' Pos: '+str(self.centerPositions)
def __init__(self, _contour,srcImage):
self.centerPositions = []
self.predictedNextPosition = [-1,-1]
self.currentContour = _contour
# mask = np.zeros(imgFrame2.shape, np.uint8)
# cv2.drawContours(mask, self.currentContour, -1, (255,255,255),1)
# roi = cv2.bitwise_and(imgFrame2,imgFrame2,mask=self.currentContour)
# cv2.imshow("roii",roi)
self.currentBoundingRect = cv2.boundingRect(self.currentContour) #x,y,w,h
x = (self.currentBoundingRect[0] + self.currentBoundingRect[0] + self.currentBoundingRect[2])/2
y = (self.currentBoundingRect[1] + self.currentBoundingRect[1] + self.currentBoundingRect[3]) / 2
self.currentCenter = (x,y)
self.width = self.currentBoundingRect[2]
self.height = self.currentBoundingRect[3]
self.area = self.currentBoundingRect[2] * self.currentBoundingRect[3]
self.centerPositions.append(self.currentCenter)
self.dblCurrentDiagonalSize = math.sqrt(math.pow(self.currentBoundingRect[2], 2) + math.pow(self.currentBoundingRect[3], 2));
self.dblCurrentAspectRatio = float(self.currentBoundingRect[2])/float(self.currentBoundingRect[3])
x,y,w,h = self.currentBoundingRect #x,y,w,h
self.currentROI = srcImage[y:y+h, x:x+w]
self.rois = []
self.noOfTimesAppeared = 1
self.featureMatches = 0
# flags
self.blnStillBeingTracked = True;
self.blnCurrentMatchFoundOrNewBlob = True;
self.intNumOfConsecutiveFramesWithoutAMatch = 0;
def predictNextPosition(self):
numPositions = len(self.centerPositions)
if (numPositions == 1):
self.predictedNextPosition[0] = self.centerPositions[-1][0]
self.predictedNextPosition[1] = self.centerPositions[-1][1]
elif (numPositions == 2):
deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 3):
sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges)/3.0))
sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 3.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 4) :
sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + \
((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 6.0))
sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + \
((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 6.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY;
elif (numPositions >= 5):
sumOfXChanges = ((self.centerPositions[numPositions - 1][0] - self.centerPositions[numPositions - 2][0]) * 4) + \
((self.centerPositions[numPositions - 2][0] - self.centerPositions[numPositions - 3][0]) * 3) + \
((self.centerPositions[numPositions - 3][0] - self.centerPositions[numPositions - 4][0]) * 2) + \
((self.centerPositions[numPositions - 4][0] - self.centerPositions[numPositions - 5][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 10.0));
sumOfYChanges = ((self.centerPositions[numPositions - 1][1] - self.centerPositions[numPositions - 2][1]) * 4) + \
((self.centerPositions[numPositions - 2][1] - self.centerPositions[numPositions - 3][1]) * 3) + \
((self.centerPositions[numPositions - 3][1] - self.centerPositions[numPositions - 4][1]) * 2) + \
((self.centerPositions[numPositions - 4][1] - self.centerPositions[numPositions - 5][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 10.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY;
else:
#should never get here
pass
def detect_point(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
print (x,y)
def findMatch(target,images):
# methods = (("Correlation", cv2.cv.CV_COMP_CORREL),("Chi-Squared", cv2.cv.CV_COMP_CHISQR),("Intersection", cv2.cv.CV_COMP_INTERSECT),("Hellinger", cv2.cv.CV_COMP_BHATTACHARYYA))
results = []
target = cv2.cvtColor(target,cv2.COLOR_BGR2RGB)
hist = cv2.calcHist(target, [0,1,2], None, [8,8,8], [0,256,0,256,0,256])
target_histogram = cv2.normalize(hist).flatten()
for i in range(len(images)):
img = cv2.cvtColor(images[i],cv2.COLOR_BGR2RGB)
hist = cv2.calcHist(img, [0,1,2], None, [8,8,8], [0,256,0,256,0,256])
hist = cv2.normalize(hist).flatten()
result = cv2.compareHist(target_histogram,hist,cv2.cv.CV_COMP_BHATTACHARYYA)
results.append((i,result))
results = sorted(results, key=lambda val: val[1])
return results[0]
#MAIN CODE
src = cv2.imread("database/img0.jpg")
cap = cv2.VideoCapture('video.avi') #video file object
target = cv2.imread("database/img2263.jpg")
cv2.namedWindow("target",cv2.WINDOW_NORMAL)
cv2.imshow("target",target)
#checks if the video file is valid
if cap.isOpened():
_,imgFrame1 = cap.read() #capturing the first reference frame
else:
sys.exit()
#variables used within the infinite loop
blnFirstFrame = True #is true if the frame captured is first frame
blobs = [] #holder for all the blobs
while cap.isOpened():
#capturing second reference frame
_,imgFrame2 = cap.read()
if imgFrame2 is None:
break
#obtaining convex hulls and newly captured image
hulls = getDifferenceHulls(imgFrame1,imgFrame2)
#Blob validation
currentFrameBlobs = []
for hull in hulls:
possibleBlob = Blob(hull,imgFrame2.copy())
#conditions to approximate the blobs
if (possibleBlob.area > 100 and \
possibleBlob.dblCurrentAspectRatio >= 0.2 and \
possibleBlob.dblCurrentAspectRatio <= 1.75 and \
possibleBlob.width > 20 and \
possibleBlob.height > 20 and \
possibleBlob.dblCurrentDiagonalSize > 30.0 and \
(cv2.contourArea(possibleBlob.currentContour) / float(possibleBlob.area)) > 0.40):
currentFrameBlobs.append(possibleBlob)
del possibleBlob
images = []
for i in range(len(currentFrameBlobs)):
images.append(currentFrameBlobs[i].currentROI)
match_idx, match_val = findMatch(target,images)
print 'index {}, val: {}'.format(match_idx, match_val)
if match_val < 0.8:
cv2.namedWindow("match",cv2.WINDOW_NORMAL)
cv2.imshow("match",currentFrameBlobs[match_idx].currentROI)
else:
cv2.destroyWindow("match")
#replacing the frame1 with frame2, so that newly captured frame can be stored in frame2
imgFrame1 = imgFrame2.copy()
#displaying any movement in the output screen
img_current_blobs = imgFrame2.copy()
img_all_blobs = imgFrame2.copy()
# drawing current frame blobs
drawBlobInfoOnImage(currentFrameBlobs,img_current_blobs)
#checks if the frame is the first frame of the video
# MATCHING PROCESS
if blnFirstFrame == True:
for currentFrameBlob in currentFrameBlobs:
blobs.append(currentFrameBlob)
else:
matchCurrentFrameBlobsToExistingBlobs(blobs,currentFrameBlobs)
cv2.imshow("current blobs",img_current_blobs)
# cv2.imshow("All blobs",img_all_blobs)
#flagging subsequent frames
blnFirstFrame = False
del currentFrameBlobs[:] #clearing the currentFrameBlobs to capture newly formed blobs
key_in = cv2.waitKey(0) & 0xFF
if(key_in == ord('q')):
break
#deletes all the opened windows
cap.release()
cv2.destroyAllWindows()
| distanceBetweenPoints | identifier_name |
histogramMatching.py | import cv2,urllib,sys,math, sys
import numpy as np
import inspect
from matplotlib import pyplot as plt
#FUNCTIONS
#executes first part of the program. i.e to find the difference between two frames
def getDifferenceHulls(imgFrame1,imgFrame2):
#making duplicates of the above frames
imgFrame1Copy = imgFrame1.copy()
imgFrame2Copy = imgFrame2.copy()
#changing the colorspace to grayscale
imgFrame1Copy = cv2.cvtColor(imgFrame1Copy,cv2.COLOR_BGR2GRAY)
imgFrame2Copy = cv2.cvtColor(imgFrame2Copy,cv2.COLOR_BGR2GRAY)
#applying gaussianblur
imgFrame1Copy = cv2.GaussianBlur(imgFrame1Copy,(5,5),0)
imgFrame2Copy = cv2.GaussianBlur(imgFrame2Copy,(5,5),0)
#finding the difference of the two frames and thresholding the diff
imgDifference = cv2.absdiff(imgFrame1Copy,imgFrame2Copy)
_,imgThresh = cv2.threshold(imgDifference,30,255,cv2.THRESH_BINARY)
# cv2.imshow("imgThresh",imgThresh)
# morphological operations: dilation and erosion
kernel = np.ones((5,5),np.uint8)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.erode(imgThresh,kernel,iterations = 1)
#finding contours of the thresholded image
contours, hierarchy = cv2.findContours(imgThresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#finding and drawing convex hulls
hulls = [] #used to store hulls
for cnt in contours:
hulls.append(cv2.convexHull(cnt))
return hulls
#draws the rectangles on the motion detected object
def drawBlobInfoOnImage(blobs,imgFrame2Copy):
for i in range(len(blobs)):
if (blobs[i].blnStillBeingTracked == True):
rect_corner1 = (blobs[i].currentBoundingRect[0],blobs[i].currentBoundingRect[1])
rect_corner2 = (blobs[i].currentBoundingRect[0]+blobs[i].width, blobs[i].currentBoundingRect[1]+blobs[i].height)
# font settings
intFontFace = cv2.FONT_HERSHEY_SIMPLEX;
dblFontScale = blobs[i].dblCurrentDiagonalSize / 60.0
intFontThickness = int(round(dblFontScale * 1.0))
point = ((rect_corner1[0]+rect_corner2[0])/2,(rect_corner1[1]+rect_corner2[1])/2)
# labels blob numbers
cv2.putText(imgFrame2Copy, str(i), blobs[i].centerPositions[-1], intFontFace, dblFontScale, (0,255,0), intFontThickness);
# draws box around the blob
cv2.rectangle(imgFrame2Copy, rect_corner1,rect_corner2, (0,0,255))
#draws the contours on the image
def drawAndShowContours(imageSize,contours,strImageName):
image = np.zeros(imageSize, dtype=np.uint8)
cv2.drawContours(image, contours, -1,(255,255,255), -1)
cv2.imshow(strImageName, image);
#draws the contours similar to the drawAndShowContours function
#but here the input provided is not the contours but object of class Blob
def drawAndShowBlobs(imageSize,blobs,strWindowsName):
|
#find the distance between two points p1 and p2
def distanceBetweenPoints(point1,point2):
intX = abs(point1[0] - point2[0])
intY = abs(point1[1] - point2[1])
return math.sqrt(math.pow(intX, 2) + math.pow(intY, 2))
#matching algorithm to corelate two blob objects by matching it with the expected one
def matchCurrentFrameBlobsToExistingBlobs(existingBlobs,currentFrameBlobs):
for existingBlob in existingBlobs:
existingBlob.blnCurrentMatchFoundOrNewBlob = False
existingBlob.predictNextPosition()
for currentFrameBlob in currentFrameBlobs:
intIndexOfLeastDistance = 0
dblLeastDistance = 100000.0
for i in range(len(existingBlobs)):
if (existingBlobs[i].blnStillBeingTracked == True):
dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions[-1], existingBlobs[i].predictedNextPosition)
# print dblDistance
if (dblDistance < dblLeastDistance):
dblLeastDistance = dblDistance
intIndexOfLeastDistance = i
if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15): #1.15 origianal, 5
addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance)
else:
addNewBlob(currentFrameBlob, existingBlobs)
for existingBlob in existingBlobs:
if (existingBlob.blnCurrentMatchFoundOrNewBlob == False):
existingBlob.intNumOfConsecutiveFramesWithoutAMatch +=1;
if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5):
existingBlob.blnStillBeingTracked = False;
#adds the details of the matching blob to the existingBlob
def addBlobToExistingBlobs(currentFrameBlob,existingBlobs,i):
# print 'found continuos blob'
existingBlobs[i].noOfTimesAppeared += 1
existingBlobs[i].rois.append(currentFrameBlob.currentROI)
existingBlobs[i].featureMatches += currentFrameBlob.featureMatches
existingBlobs[i].noOfTimesAppeared += currentFrameBlob.noOfTimesAppeared
existingBlobs[i].currentContour = currentFrameBlob.currentContour;
existingBlobs[i].currentBoundingRect = currentFrameBlob.currentBoundingRect;
existingBlobs[i].centerPositions.append(currentFrameBlob.centerPositions[-1])
# if len(existingBlobs[i].centerPositions) > 30:
# del existingBlobs[i].centerPositions[0]
existingBlobs[i].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize;
existingBlobs[i].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio;
existingBlobs[i].blnStillBeingTracked = True;
existingBlobs[i].blnCurrentMatchFoundOrNewBlob = True;
#adds new blob to the list
def addNewBlob(currentFrameBlob,existingBlobs):
currentFrameBlob.blnCurrentMatchFoundOrNewBlob = True
existingBlobs.append(currentFrameBlob)
#CLASS
#class Blob consisting of variables and functions related to it
class Blob:
#functions
def printInfo(self):
print 'area: '+str(self.area)+' Pos: '+str(self.centerPositions)
def __init__(self, _contour,srcImage):
self.centerPositions = []
self.predictedNextPosition = [-1,-1]
self.currentContour = _contour
# mask = np.zeros(imgFrame2.shape, np.uint8)
# cv2.drawContours(mask, self.currentContour, -1, (255,255,255),1)
# roi = cv2.bitwise_and(imgFrame2,imgFrame2,mask=self.currentContour)
# cv2.imshow("roii",roi)
self.currentBoundingRect = cv2.boundingRect(self.currentContour) #x,y,w,h
x = (self.currentBoundingRect[0] + self.currentBoundingRect[0] + self.currentBoundingRect[2])/2
y = (self.currentBoundingRect[1] + self.currentBoundingRect[1] + self.currentBoundingRect[3]) / 2
self.currentCenter = (x,y)
self.width = self.currentBoundingRect[2]
self.height = self.currentBoundingRect[3]
self.area = self.currentBoundingRect[2] * self.currentBoundingRect[3]
self.centerPositions.append(self.currentCenter)
self.dblCurrentDiagonalSize = math.sqrt(math.pow(self.currentBoundingRect[2], 2) + math.pow(self.currentBoundingRect[3], 2));
self.dblCurrentAspectRatio = float(self.currentBoundingRect[2])/float(self.currentBoundingRect[3])
x,y,w,h = self.currentBoundingRect #x,y,w,h
self.currentROI = srcImage[y:y+h, x:x+w]
self.rois = []
self.noOfTimesAppeared = 1
self.featureMatches = 0
# flags
self.blnStillBeingTracked = True;
self.blnCurrentMatchFoundOrNewBlob = True;
self.intNumOfConsecutiveFramesWithoutAMatch = 0;
def predictNextPosition(self):
numPositions = len(self.centerPositions)
if (numPositions == 1):
self.predictedNextPosition[0] = self.centerPositions[-1][0]
self.predictedNextPosition[1] = self.centerPositions[-1][1]
elif (numPositions == 2):
deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 3):
sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges)/3.0))
sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 3.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 4) :
sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + \
((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 6.0))
sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + \
((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 6.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY;
elif (numPositions >= 5):
sumOfXChanges = ((self.centerPositions[numPositions - 1][0] - self.centerPositions[numPositions - 2][0]) * 4) + \
((self.centerPositions[numPositions - 2][0] - self.centerPositions[numPositions - 3][0]) * 3) + \
((self.centerPositions[numPositions - 3][0] - self.centerPositions[numPositions - 4][0]) * 2) + \
((self.centerPositions[numPositions - 4][0] - self.centerPositions[numPositions - 5][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 10.0));
sumOfYChanges = ((self.centerPositions[numPositions - 1][1] - self.centerPositions[numPositions - 2][1]) * 4) + \
((self.centerPositions[numPositions - 2][1] - self.centerPositions[numPositions - 3][1]) * 3) + \
((self.centerPositions[numPositions - 3][1] - self.centerPositions[numPositions - 4][1]) * 2) + \
((self.centerPositions[numPositions - 4][1] - self.centerPositions[numPositions - 5][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 10.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY;
else:
#should never get here
pass
def detect_point(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
print (x,y)
def findMatch(target,images):
# methods = (("Correlation", cv2.cv.CV_COMP_CORREL),("Chi-Squared", cv2.cv.CV_COMP_CHISQR),("Intersection", cv2.cv.CV_COMP_INTERSECT),("Hellinger", cv2.cv.CV_COMP_BHATTACHARYYA))
results = []
target = cv2.cvtColor(target,cv2.COLOR_BGR2RGB)
hist = cv2.calcHist(target, [0,1,2], None, [8,8,8], [0,256,0,256,0,256])
target_histogram = cv2.normalize(hist).flatten()
for i in range(len(images)):
img = cv2.cvtColor(images[i],cv2.COLOR_BGR2RGB)
hist = cv2.calcHist(img, [0,1,2], None, [8,8,8], [0,256,0,256,0,256])
hist = cv2.normalize(hist).flatten()
result = cv2.compareHist(target_histogram,hist,cv2.cv.CV_COMP_BHATTACHARYYA)
results.append((i,result))
results = sorted(results, key=lambda val: val[1])
return results[0]
#MAIN CODE
src = cv2.imread("database/img0.jpg")
cap = cv2.VideoCapture('video.avi') #video file object
target = cv2.imread("database/img2263.jpg")
cv2.namedWindow("target",cv2.WINDOW_NORMAL)
cv2.imshow("target",target)
#checks if the video file is valid
if cap.isOpened():
_,imgFrame1 = cap.read() #capturing the first reference frame
else:
sys.exit()
#variables used within the infinite loop
blnFirstFrame = True #is true if the frame captured is first frame
blobs = [] #holder for all the blobs
while cap.isOpened():
#capturing second reference frame
_,imgFrame2 = cap.read()
if imgFrame2 is None:
break
#obtaining convex hulls and newly captured image
hulls = getDifferenceHulls(imgFrame1,imgFrame2)
#Blob validation
currentFrameBlobs = []
for hull in hulls:
possibleBlob = Blob(hull,imgFrame2.copy())
#conditions to approximate the blobs
if (possibleBlob.area > 100 and \
possibleBlob.dblCurrentAspectRatio >= 0.2 and \
possibleBlob.dblCurrentAspectRatio <= 1.75 and \
possibleBlob.width > 20 and \
possibleBlob.height > 20 and \
possibleBlob.dblCurrentDiagonalSize > 30.0 and \
(cv2.contourArea(possibleBlob.currentContour) / float(possibleBlob.area)) > 0.40):
currentFrameBlobs.append(possibleBlob)
del possibleBlob
images = []
for i in range(len(currentFrameBlobs)):
images.append(currentFrameBlobs[i].currentROI)
match_idx, match_val = findMatch(target,images)
print 'index {}, val: {}'.format(match_idx, match_val)
if match_val < 0.8:
cv2.namedWindow("match",cv2.WINDOW_NORMAL)
cv2.imshow("match",currentFrameBlobs[match_idx].currentROI)
else:
cv2.destroyWindow("match")
#replacing the frame1 with frame2, so that newly captured frame can be stored in frame2
imgFrame1 = imgFrame2.copy()
#displaying any movement in the output screen
img_current_blobs = imgFrame2.copy()
img_all_blobs = imgFrame2.copy()
# drawing current frame blobs
drawBlobInfoOnImage(currentFrameBlobs,img_current_blobs)
#checks if the frame is the first frame of the video
# MATCHING PROCESS
if blnFirstFrame == True:
for currentFrameBlob in currentFrameBlobs:
blobs.append(currentFrameBlob)
else:
matchCurrentFrameBlobsToExistingBlobs(blobs,currentFrameBlobs)
cv2.imshow("current blobs",img_current_blobs)
# cv2.imshow("All blobs",img_all_blobs)
#flagging subsequent frames
blnFirstFrame = False
del currentFrameBlobs[:] #clearing the currentFrameBlobs to capture newly formed blobs
key_in = cv2.waitKey(0) & 0xFF
if(key_in == ord('q')):
break
#deletes all the opened windows
cap.release()
cv2.destroyAllWindows()
| image = np.zeros(imageSize, dtype=np.uint8)
contours = []
for blob in blobs:
if blob.blnStillBeingTracked == True:
contours.append(blob.currentContour)
cv2.drawContours(image, contours, -1,(255,255,255), -1);
cv2.imshow(strWindowsName, image); | identifier_body |
histogramMatching.py | import cv2,urllib,sys,math, sys
import numpy as np
import inspect
from matplotlib import pyplot as plt
#FUNCTIONS
#executes first part of the program. i.e to find the difference between two frames |
#changing the colorspace to grayscale
imgFrame1Copy = cv2.cvtColor(imgFrame1Copy,cv2.COLOR_BGR2GRAY)
imgFrame2Copy = cv2.cvtColor(imgFrame2Copy,cv2.COLOR_BGR2GRAY)
#applying gaussianblur
imgFrame1Copy = cv2.GaussianBlur(imgFrame1Copy,(5,5),0)
imgFrame2Copy = cv2.GaussianBlur(imgFrame2Copy,(5,5),0)
#finding the difference of the two frames and thresholding the diff
imgDifference = cv2.absdiff(imgFrame1Copy,imgFrame2Copy)
_,imgThresh = cv2.threshold(imgDifference,30,255,cv2.THRESH_BINARY)
# cv2.imshow("imgThresh",imgThresh)
# morphological operations: dilation and erosion
kernel = np.ones((5,5),np.uint8)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.erode(imgThresh,kernel,iterations = 1)
#finding contours of the thresholded image
contours, hierarchy = cv2.findContours(imgThresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#finding and drawing convex hulls
hulls = [] #used to store hulls
for cnt in contours:
hulls.append(cv2.convexHull(cnt))
return hulls
#draws the rectangles on the motion detected object
def drawBlobInfoOnImage(blobs,imgFrame2Copy):
for i in range(len(blobs)):
if (blobs[i].blnStillBeingTracked == True):
rect_corner1 = (blobs[i].currentBoundingRect[0],blobs[i].currentBoundingRect[1])
rect_corner2 = (blobs[i].currentBoundingRect[0]+blobs[i].width, blobs[i].currentBoundingRect[1]+blobs[i].height)
# font settings
intFontFace = cv2.FONT_HERSHEY_SIMPLEX;
dblFontScale = blobs[i].dblCurrentDiagonalSize / 60.0
intFontThickness = int(round(dblFontScale * 1.0))
point = ((rect_corner1[0]+rect_corner2[0])/2,(rect_corner1[1]+rect_corner2[1])/2)
# labels blob numbers
cv2.putText(imgFrame2Copy, str(i), blobs[i].centerPositions[-1], intFontFace, dblFontScale, (0,255,0), intFontThickness);
# draws box around the blob
cv2.rectangle(imgFrame2Copy, rect_corner1,rect_corner2, (0,0,255))
#draws the contours on the image
def drawAndShowContours(imageSize,contours,strImageName):
image = np.zeros(imageSize, dtype=np.uint8)
cv2.drawContours(image, contours, -1,(255,255,255), -1)
cv2.imshow(strImageName, image);
#draws the contours similar to the drawAndShowContours function
#but here the input provided is not the contours but object of class Blob
def drawAndShowBlobs(imageSize,blobs,strWindowsName):
image = np.zeros(imageSize, dtype=np.uint8)
contours = []
for blob in blobs:
if blob.blnStillBeingTracked == True:
contours.append(blob.currentContour)
cv2.drawContours(image, contours, -1,(255,255,255), -1);
cv2.imshow(strWindowsName, image);
#find the distance between two points p1 and p2
def distanceBetweenPoints(point1,point2):
intX = abs(point1[0] - point2[0])
intY = abs(point1[1] - point2[1])
return math.sqrt(math.pow(intX, 2) + math.pow(intY, 2))
#matching algorithm to corelate two blob objects by matching it with the expected one
def matchCurrentFrameBlobsToExistingBlobs(existingBlobs,currentFrameBlobs):
for existingBlob in existingBlobs:
existingBlob.blnCurrentMatchFoundOrNewBlob = False
existingBlob.predictNextPosition()
for currentFrameBlob in currentFrameBlobs:
intIndexOfLeastDistance = 0
dblLeastDistance = 100000.0
for i in range(len(existingBlobs)):
if (existingBlobs[i].blnStillBeingTracked == True):
dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions[-1], existingBlobs[i].predictedNextPosition)
# print dblDistance
if (dblDistance < dblLeastDistance):
dblLeastDistance = dblDistance
intIndexOfLeastDistance = i
if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15): #1.15 origianal, 5
addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance)
else:
addNewBlob(currentFrameBlob, existingBlobs)
for existingBlob in existingBlobs:
if (existingBlob.blnCurrentMatchFoundOrNewBlob == False):
existingBlob.intNumOfConsecutiveFramesWithoutAMatch +=1;
if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5):
existingBlob.blnStillBeingTracked = False;
#adds the details of the matching blob to the existingBlob
def addBlobToExistingBlobs(currentFrameBlob,existingBlobs,i):
# print 'found continuos blob'
existingBlobs[i].noOfTimesAppeared += 1
existingBlobs[i].rois.append(currentFrameBlob.currentROI)
existingBlobs[i].featureMatches += currentFrameBlob.featureMatches
existingBlobs[i].noOfTimesAppeared += currentFrameBlob.noOfTimesAppeared
existingBlobs[i].currentContour = currentFrameBlob.currentContour;
existingBlobs[i].currentBoundingRect = currentFrameBlob.currentBoundingRect;
existingBlobs[i].centerPositions.append(currentFrameBlob.centerPositions[-1])
# if len(existingBlobs[i].centerPositions) > 30:
# del existingBlobs[i].centerPositions[0]
existingBlobs[i].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize;
existingBlobs[i].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio;
existingBlobs[i].blnStillBeingTracked = True;
existingBlobs[i].blnCurrentMatchFoundOrNewBlob = True;
#adds new blob to the list
def addNewBlob(currentFrameBlob,existingBlobs):
currentFrameBlob.blnCurrentMatchFoundOrNewBlob = True
existingBlobs.append(currentFrameBlob)
#CLASS
#class Blob consisting of variables and functions related to it
class Blob:
#functions
def printInfo(self):
print 'area: '+str(self.area)+' Pos: '+str(self.centerPositions)
def __init__(self, _contour,srcImage):
self.centerPositions = []
self.predictedNextPosition = [-1,-1]
self.currentContour = _contour
# mask = np.zeros(imgFrame2.shape, np.uint8)
# cv2.drawContours(mask, self.currentContour, -1, (255,255,255),1)
# roi = cv2.bitwise_and(imgFrame2,imgFrame2,mask=self.currentContour)
# cv2.imshow("roii",roi)
self.currentBoundingRect = cv2.boundingRect(self.currentContour) #x,y,w,h
x = (self.currentBoundingRect[0] + self.currentBoundingRect[0] + self.currentBoundingRect[2])/2
y = (self.currentBoundingRect[1] + self.currentBoundingRect[1] + self.currentBoundingRect[3]) / 2
self.currentCenter = (x,y)
self.width = self.currentBoundingRect[2]
self.height = self.currentBoundingRect[3]
self.area = self.currentBoundingRect[2] * self.currentBoundingRect[3]
self.centerPositions.append(self.currentCenter)
self.dblCurrentDiagonalSize = math.sqrt(math.pow(self.currentBoundingRect[2], 2) + math.pow(self.currentBoundingRect[3], 2));
self.dblCurrentAspectRatio = float(self.currentBoundingRect[2])/float(self.currentBoundingRect[3])
x,y,w,h = self.currentBoundingRect #x,y,w,h
self.currentROI = srcImage[y:y+h, x:x+w]
self.rois = []
self.noOfTimesAppeared = 1
self.featureMatches = 0
# flags
self.blnStillBeingTracked = True;
self.blnCurrentMatchFoundOrNewBlob = True;
self.intNumOfConsecutiveFramesWithoutAMatch = 0;
def predictNextPosition(self):
numPositions = len(self.centerPositions)
if (numPositions == 1):
self.predictedNextPosition[0] = self.centerPositions[-1][0]
self.predictedNextPosition[1] = self.centerPositions[-1][1]
elif (numPositions == 2):
deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 3):
sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges)/3.0))
sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 3.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 4) :
sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + \
((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 6.0))
sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + \
((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 6.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY;
elif (numPositions >= 5):
sumOfXChanges = ((self.centerPositions[numPositions - 1][0] - self.centerPositions[numPositions - 2][0]) * 4) + \
((self.centerPositions[numPositions - 2][0] - self.centerPositions[numPositions - 3][0]) * 3) + \
((self.centerPositions[numPositions - 3][0] - self.centerPositions[numPositions - 4][0]) * 2) + \
((self.centerPositions[numPositions - 4][0] - self.centerPositions[numPositions - 5][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 10.0));
sumOfYChanges = ((self.centerPositions[numPositions - 1][1] - self.centerPositions[numPositions - 2][1]) * 4) + \
((self.centerPositions[numPositions - 2][1] - self.centerPositions[numPositions - 3][1]) * 3) + \
((self.centerPositions[numPositions - 3][1] - self.centerPositions[numPositions - 4][1]) * 2) + \
((self.centerPositions[numPositions - 4][1] - self.centerPositions[numPositions - 5][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 10.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY;
else:
#should never get here
pass
def detect_point(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
print (x,y)
def findMatch(target,images):
# methods = (("Correlation", cv2.cv.CV_COMP_CORREL),("Chi-Squared", cv2.cv.CV_COMP_CHISQR),("Intersection", cv2.cv.CV_COMP_INTERSECT),("Hellinger", cv2.cv.CV_COMP_BHATTACHARYYA))
results = []
target = cv2.cvtColor(target,cv2.COLOR_BGR2RGB)
hist = cv2.calcHist(target, [0,1,2], None, [8,8,8], [0,256,0,256,0,256])
target_histogram = cv2.normalize(hist).flatten()
for i in range(len(images)):
img = cv2.cvtColor(images[i],cv2.COLOR_BGR2RGB)
hist = cv2.calcHist(img, [0,1,2], None, [8,8,8], [0,256,0,256,0,256])
hist = cv2.normalize(hist).flatten()
result = cv2.compareHist(target_histogram,hist,cv2.cv.CV_COMP_BHATTACHARYYA)
results.append((i,result))
results = sorted(results, key=lambda val: val[1])
return results[0]
#MAIN CODE
src = cv2.imread("database/img0.jpg")
cap = cv2.VideoCapture('video.avi') #video file object
target = cv2.imread("database/img2263.jpg")
cv2.namedWindow("target",cv2.WINDOW_NORMAL)
cv2.imshow("target",target)
#checks if the video file is valid
if cap.isOpened():
_,imgFrame1 = cap.read() #capturing the first reference frame
else:
sys.exit()
#variables used within the infinite loop
blnFirstFrame = True #is true if the frame captured is first frame
blobs = [] #holder for all the blobs
while cap.isOpened():
#capturing second reference frame
_,imgFrame2 = cap.read()
if imgFrame2 is None:
break
#obtaining convex hulls and newly captured image
hulls = getDifferenceHulls(imgFrame1,imgFrame2)
#Blob validation
currentFrameBlobs = []
for hull in hulls:
possibleBlob = Blob(hull,imgFrame2.copy())
#conditions to approximate the blobs
if (possibleBlob.area > 100 and \
possibleBlob.dblCurrentAspectRatio >= 0.2 and \
possibleBlob.dblCurrentAspectRatio <= 1.75 and \
possibleBlob.width > 20 and \
possibleBlob.height > 20 and \
possibleBlob.dblCurrentDiagonalSize > 30.0 and \
(cv2.contourArea(possibleBlob.currentContour) / float(possibleBlob.area)) > 0.40):
currentFrameBlobs.append(possibleBlob)
del possibleBlob
images = []
for i in range(len(currentFrameBlobs)):
images.append(currentFrameBlobs[i].currentROI)
match_idx, match_val = findMatch(target,images)
print 'index {}, val: {}'.format(match_idx, match_val)
if match_val < 0.8:
cv2.namedWindow("match",cv2.WINDOW_NORMAL)
cv2.imshow("match",currentFrameBlobs[match_idx].currentROI)
else:
cv2.destroyWindow("match")
#replacing the frame1 with frame2, so that newly captured frame can be stored in frame2
imgFrame1 = imgFrame2.copy()
#displaying any movement in the output screen
img_current_blobs = imgFrame2.copy()
img_all_blobs = imgFrame2.copy()
# drawing current frame blobs
drawBlobInfoOnImage(currentFrameBlobs,img_current_blobs)
#checks if the frame is the first frame of the video
# MATCHING PROCESS
if blnFirstFrame == True:
for currentFrameBlob in currentFrameBlobs:
blobs.append(currentFrameBlob)
else:
matchCurrentFrameBlobsToExistingBlobs(blobs,currentFrameBlobs)
cv2.imshow("current blobs",img_current_blobs)
# cv2.imshow("All blobs",img_all_blobs)
#flagging subsequent frames
blnFirstFrame = False
del currentFrameBlobs[:] #clearing the currentFrameBlobs to capture newly formed blobs
key_in = cv2.waitKey(0) & 0xFF
if(key_in == ord('q')):
break
#deletes all the opened windows
cap.release()
cv2.destroyAllWindows() | def getDifferenceHulls(imgFrame1,imgFrame2):
#making duplicates of the above frames
imgFrame1Copy = imgFrame1.copy()
imgFrame2Copy = imgFrame2.copy() | random_line_split |
histogramMatching.py | import cv2,urllib,sys,math, sys
import numpy as np
import inspect
from matplotlib import pyplot as plt
#FUNCTIONS
#executes first part of the program. i.e to find the difference between two frames
def getDifferenceHulls(imgFrame1,imgFrame2):
#making duplicates of the above frames
imgFrame1Copy = imgFrame1.copy()
imgFrame2Copy = imgFrame2.copy()
#changing the colorspace to grayscale
imgFrame1Copy = cv2.cvtColor(imgFrame1Copy,cv2.COLOR_BGR2GRAY)
imgFrame2Copy = cv2.cvtColor(imgFrame2Copy,cv2.COLOR_BGR2GRAY)
#applying gaussianblur
imgFrame1Copy = cv2.GaussianBlur(imgFrame1Copy,(5,5),0)
imgFrame2Copy = cv2.GaussianBlur(imgFrame2Copy,(5,5),0)
#finding the difference of the two frames and thresholding the diff
imgDifference = cv2.absdiff(imgFrame1Copy,imgFrame2Copy)
_,imgThresh = cv2.threshold(imgDifference,30,255,cv2.THRESH_BINARY)
# cv2.imshow("imgThresh",imgThresh)
# morphological operations: dilation and erosion
kernel = np.ones((5,5),np.uint8)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.erode(imgThresh,kernel,iterations = 1)
#finding contours of the thresholded image
contours, hierarchy = cv2.findContours(imgThresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#finding and drawing convex hulls
hulls = [] #used to store hulls
for cnt in contours:
hulls.append(cv2.convexHull(cnt))
return hulls
#draws the rectangles on the motion detected object
def drawBlobInfoOnImage(blobs,imgFrame2Copy):
for i in range(len(blobs)):
if (blobs[i].blnStillBeingTracked == True):
rect_corner1 = (blobs[i].currentBoundingRect[0],blobs[i].currentBoundingRect[1])
rect_corner2 = (blobs[i].currentBoundingRect[0]+blobs[i].width, blobs[i].currentBoundingRect[1]+blobs[i].height)
# font settings
intFontFace = cv2.FONT_HERSHEY_SIMPLEX;
dblFontScale = blobs[i].dblCurrentDiagonalSize / 60.0
intFontThickness = int(round(dblFontScale * 1.0))
point = ((rect_corner1[0]+rect_corner2[0])/2,(rect_corner1[1]+rect_corner2[1])/2)
# labels blob numbers
cv2.putText(imgFrame2Copy, str(i), blobs[i].centerPositions[-1], intFontFace, dblFontScale, (0,255,0), intFontThickness);
# draws box around the blob
cv2.rectangle(imgFrame2Copy, rect_corner1,rect_corner2, (0,0,255))
#draws the contours on the image
def drawAndShowContours(imageSize,contours,strImageName):
image = np.zeros(imageSize, dtype=np.uint8)
cv2.drawContours(image, contours, -1,(255,255,255), -1)
cv2.imshow(strImageName, image);
#draws the contours similar to the drawAndShowContours function
#but here the input provided is not the contours but object of class Blob
def drawAndShowBlobs(imageSize,blobs,strWindowsName):
image = np.zeros(imageSize, dtype=np.uint8)
contours = []
for blob in blobs:
if blob.blnStillBeingTracked == True:
contours.append(blob.currentContour)
cv2.drawContours(image, contours, -1,(255,255,255), -1);
cv2.imshow(strWindowsName, image);
#find the distance between two points p1 and p2
def distanceBetweenPoints(point1,point2):
intX = abs(point1[0] - point2[0])
intY = abs(point1[1] - point2[1])
return math.sqrt(math.pow(intX, 2) + math.pow(intY, 2))
#matching algorithm to corelate two blob objects by matching it with the expected one
def matchCurrentFrameBlobsToExistingBlobs(existingBlobs,currentFrameBlobs):
for existingBlob in existingBlobs:
existingBlob.blnCurrentMatchFoundOrNewBlob = False
existingBlob.predictNextPosition()
for currentFrameBlob in currentFrameBlobs:
intIndexOfLeastDistance = 0
dblLeastDistance = 100000.0
for i in range(len(existingBlobs)):
if (existingBlobs[i].blnStillBeingTracked == True):
dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions[-1], existingBlobs[i].predictedNextPosition)
# print dblDistance
if (dblDistance < dblLeastDistance):
dblLeastDistance = dblDistance
intIndexOfLeastDistance = i
if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15): #1.15 origianal, 5
|
else:
addNewBlob(currentFrameBlob, existingBlobs)
for existingBlob in existingBlobs:
if (existingBlob.blnCurrentMatchFoundOrNewBlob == False):
existingBlob.intNumOfConsecutiveFramesWithoutAMatch +=1;
if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5):
existingBlob.blnStillBeingTracked = False;
#adds the details of the matching blob to the existingBlob
def addBlobToExistingBlobs(currentFrameBlob,existingBlobs,i):
# print 'found continuos blob'
existingBlobs[i].noOfTimesAppeared += 1
existingBlobs[i].rois.append(currentFrameBlob.currentROI)
existingBlobs[i].featureMatches += currentFrameBlob.featureMatches
existingBlobs[i].noOfTimesAppeared += currentFrameBlob.noOfTimesAppeared
existingBlobs[i].currentContour = currentFrameBlob.currentContour;
existingBlobs[i].currentBoundingRect = currentFrameBlob.currentBoundingRect;
existingBlobs[i].centerPositions.append(currentFrameBlob.centerPositions[-1])
# if len(existingBlobs[i].centerPositions) > 30:
# del existingBlobs[i].centerPositions[0]
existingBlobs[i].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize;
existingBlobs[i].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio;
existingBlobs[i].blnStillBeingTracked = True;
existingBlobs[i].blnCurrentMatchFoundOrNewBlob = True;
#adds new blob to the list
def addNewBlob(currentFrameBlob,existingBlobs):
currentFrameBlob.blnCurrentMatchFoundOrNewBlob = True
existingBlobs.append(currentFrameBlob)
#CLASS
#class Blob consisting of variables and functions related to it
class Blob:
#functions
def printInfo(self):
print 'area: '+str(self.area)+' Pos: '+str(self.centerPositions)
def __init__(self, _contour,srcImage):
self.centerPositions = []
self.predictedNextPosition = [-1,-1]
self.currentContour = _contour
# mask = np.zeros(imgFrame2.shape, np.uint8)
# cv2.drawContours(mask, self.currentContour, -1, (255,255,255),1)
# roi = cv2.bitwise_and(imgFrame2,imgFrame2,mask=self.currentContour)
# cv2.imshow("roii",roi)
self.currentBoundingRect = cv2.boundingRect(self.currentContour) #x,y,w,h
x = (self.currentBoundingRect[0] + self.currentBoundingRect[0] + self.currentBoundingRect[2])/2
y = (self.currentBoundingRect[1] + self.currentBoundingRect[1] + self.currentBoundingRect[3]) / 2
self.currentCenter = (x,y)
self.width = self.currentBoundingRect[2]
self.height = self.currentBoundingRect[3]
self.area = self.currentBoundingRect[2] * self.currentBoundingRect[3]
self.centerPositions.append(self.currentCenter)
self.dblCurrentDiagonalSize = math.sqrt(math.pow(self.currentBoundingRect[2], 2) + math.pow(self.currentBoundingRect[3], 2));
self.dblCurrentAspectRatio = float(self.currentBoundingRect[2])/float(self.currentBoundingRect[3])
x,y,w,h = self.currentBoundingRect #x,y,w,h
self.currentROI = srcImage[y:y+h, x:x+w]
self.rois = []
self.noOfTimesAppeared = 1
self.featureMatches = 0
# flags
self.blnStillBeingTracked = True;
self.blnCurrentMatchFoundOrNewBlob = True;
self.intNumOfConsecutiveFramesWithoutAMatch = 0;
def predictNextPosition(self):
numPositions = len(self.centerPositions)
if (numPositions == 1):
self.predictedNextPosition[0] = self.centerPositions[-1][0]
self.predictedNextPosition[1] = self.centerPositions[-1][1]
elif (numPositions == 2):
deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 3):
sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges)/3.0))
sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 3.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 4) :
sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + \
((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 6.0))
sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + \
((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 6.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY;
elif (numPositions >= 5):
sumOfXChanges = ((self.centerPositions[numPositions - 1][0] - self.centerPositions[numPositions - 2][0]) * 4) + \
((self.centerPositions[numPositions - 2][0] - self.centerPositions[numPositions - 3][0]) * 3) + \
((self.centerPositions[numPositions - 3][0] - self.centerPositions[numPositions - 4][0]) * 2) + \
((self.centerPositions[numPositions - 4][0] - self.centerPositions[numPositions - 5][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 10.0));
sumOfYChanges = ((self.centerPositions[numPositions - 1][1] - self.centerPositions[numPositions - 2][1]) * 4) + \
((self.centerPositions[numPositions - 2][1] - self.centerPositions[numPositions - 3][1]) * 3) + \
((self.centerPositions[numPositions - 3][1] - self.centerPositions[numPositions - 4][1]) * 2) + \
((self.centerPositions[numPositions - 4][1] - self.centerPositions[numPositions - 5][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 10.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY;
else:
#should never get here
pass
def detect_point(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
print (x,y)
def findMatch(target,images):
# methods = (("Correlation", cv2.cv.CV_COMP_CORREL),("Chi-Squared", cv2.cv.CV_COMP_CHISQR),("Intersection", cv2.cv.CV_COMP_INTERSECT),("Hellinger", cv2.cv.CV_COMP_BHATTACHARYYA))
results = []
target = cv2.cvtColor(target,cv2.COLOR_BGR2RGB)
hist = cv2.calcHist(target, [0,1,2], None, [8,8,8], [0,256,0,256,0,256])
target_histogram = cv2.normalize(hist).flatten()
for i in range(len(images)):
img = cv2.cvtColor(images[i],cv2.COLOR_BGR2RGB)
hist = cv2.calcHist(img, [0,1,2], None, [8,8,8], [0,256,0,256,0,256])
hist = cv2.normalize(hist).flatten()
result = cv2.compareHist(target_histogram,hist,cv2.cv.CV_COMP_BHATTACHARYYA)
results.append((i,result))
results = sorted(results, key=lambda val: val[1])
return results[0]
#MAIN CODE
src = cv2.imread("database/img0.jpg")
cap = cv2.VideoCapture('video.avi') #video file object
target = cv2.imread("database/img2263.jpg")
cv2.namedWindow("target",cv2.WINDOW_NORMAL)
cv2.imshow("target",target)
#checks if the video file is valid
if cap.isOpened():
_,imgFrame1 = cap.read() #capturing the first reference frame
else:
sys.exit()
#variables used within the infinite loop
blnFirstFrame = True #is true if the frame captured is first frame
blobs = [] #holder for all the blobs
while cap.isOpened():
#capturing second reference frame
_,imgFrame2 = cap.read()
if imgFrame2 is None:
break
#obtaining convex hulls and newly captured image
hulls = getDifferenceHulls(imgFrame1,imgFrame2)
#Blob validation
currentFrameBlobs = []
for hull in hulls:
possibleBlob = Blob(hull,imgFrame2.copy())
#conditions to approximate the blobs
if (possibleBlob.area > 100 and \
possibleBlob.dblCurrentAspectRatio >= 0.2 and \
possibleBlob.dblCurrentAspectRatio <= 1.75 and \
possibleBlob.width > 20 and \
possibleBlob.height > 20 and \
possibleBlob.dblCurrentDiagonalSize > 30.0 and \
(cv2.contourArea(possibleBlob.currentContour) / float(possibleBlob.area)) > 0.40):
currentFrameBlobs.append(possibleBlob)
del possibleBlob
images = []
for i in range(len(currentFrameBlobs)):
images.append(currentFrameBlobs[i].currentROI)
match_idx, match_val = findMatch(target,images)
print 'index {}, val: {}'.format(match_idx, match_val)
if match_val < 0.8:
cv2.namedWindow("match",cv2.WINDOW_NORMAL)
cv2.imshow("match",currentFrameBlobs[match_idx].currentROI)
else:
cv2.destroyWindow("match")
#replacing the frame1 with frame2, so that newly captured frame can be stored in frame2
imgFrame1 = imgFrame2.copy()
#displaying any movement in the output screen
img_current_blobs = imgFrame2.copy()
img_all_blobs = imgFrame2.copy()
# drawing current frame blobs
drawBlobInfoOnImage(currentFrameBlobs,img_current_blobs)
#checks if the frame is the first frame of the video
# MATCHING PROCESS
if blnFirstFrame == True:
for currentFrameBlob in currentFrameBlobs:
blobs.append(currentFrameBlob)
else:
matchCurrentFrameBlobsToExistingBlobs(blobs,currentFrameBlobs)
cv2.imshow("current blobs",img_current_blobs)
# cv2.imshow("All blobs",img_all_blobs)
#flagging subsequent frames
blnFirstFrame = False
del currentFrameBlobs[:] #clearing the currentFrameBlobs to capture newly formed blobs
key_in = cv2.waitKey(0) & 0xFF
if(key_in == ord('q')):
break
#deletes all the opened windows
cap.release()
cv2.destroyAllWindows()
| addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance) | conditional_block |
chat.js | var chat_croomid = 0;
var chat_croomsign = 0;
var chat_availability = 0;
var chat_lastmsgtime = 0;
var chat_lastlineid = 0;
function setCookie(c_name,value,exdays)
{
var exdate=new Date();
exdate.setDate(exdate.getDate() + exdays);
var c_value=escape(value) + ((exdays==null) ? "" : "; expires="+exdate.toUTCString());
document.cookie=c_name + "=" + c_value;
}
function getCookie(c_name)
{
var i,x,y,ARRcookies=document.cookie.split(";");
for (i=0;i<ARRcookies.length;i++)
{
x=ARRcookies[i].substr(0,ARRcookies[i].indexOf("="));
y=ARRcookies[i].substr(ARRcookies[i].indexOf("=")+1);
x=x.replace(/^\s+|\s+$/g,"");
if (x==c_name)
|
}
}
function chat_show()
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
chat_resize();
post_center();
}
function chat_hide()
{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
chat_resize();
post_center();
}
function chat_isopen()
{
if(getCookie("cards_chatbox") == "1")
return 1;
else
return 0;
}
function showhide_chat(item)
{
if($(item).style.visibility != 'visible')
{
$(item).style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
}else{
$(item).style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
}
chat_resize();
post_center();
}
function chat_resize()
{
$("chatcontent").style.height = g_win_height - ($("cboxtop").offsetHeight + 30) + 'px';
var pha_content = $('pha_content');
if(pha_content)
{
if(chat_isopen())
pha_content.style.width = (g_win_width - 330) + 'px';
else
pha_content.style.width = (g_win_width - 1) + 'px';
}
var ct = $("chattext");
ct.scrollTop = ct.scrollHeight;
}
//window.onresize = chat_resize;
function chat_init()
{
$("chattextinput").onkeyup = function(e)
{
e = e || event;
if (e.keyCode === 13 && !e.shiftKey) {
chat_sendmsg();
}
return true;
}
chat_reload();
if(getCookie("cards_chatbox") == "1")
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
chat_resize();
if(chat_croomid)
{
chatc_viewex(chat_croomid, chat_croomsign, 0);
}
}else{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
}
}
/* conversation management */
function chatc_clear()
{
var o = $('chatntf');
o.innerHTML = "";
o.setAttribute('data-ccount', 0);
}
function chatc_create(users, userav, newcount, rid, csignature, usersloc)
{
var o = $('chatntf');
/* offline available busy away */
var avm = ["555555", "99cc66", "ff6633", "ffcc00"];
var nbv = "";
var ulist = "";
var maxut = o.getAttribute('data-maxt');
var cct = o.getAttribute('data-ccount');
var ppt = "";
cct = parseInt(cct) + 1;
if(users.length > maxut)
ppt = "<div class='chatntf_plus'></div>";
if(users.length < maxut)
maxut = users.length;
for(var i=0; i<maxut; i++)
{
//ulist += "<a href='#'><div class='chatntf_pic' onmouseover='vusr(this, \"" + users[i] + "\")' style=\"background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
if(usersloc[i] != 0) /* image available */
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
else
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('images/failsafe/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
//ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer;\"><img src='data/u" + usersloc[i] + "/dp/2.jpg' onerror='failsafe_img(this, 2);'/><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
}
if(newcount) nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "'>" + newcount + "</div>";
else nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "' style='visibility: hidden;'>" + newcount + "</div>";
var ct = "<div class=\"chatntf_box\" onclick='chatc_view(\"" + rid + "\", \"" + csignature + "\")' data-rid='" + rid + "' data-cs='" + csignature + "' id='chatntfcid" + cct + "'><div class=\"chatntf_pic_st\"></div>" +
ulist + nbv + "<div class='chatntf_x' onclick=\"javascript: chatc_close('" + cct + "');\"></div>" + ppt + "</div><div style='clear:both;'></div>";
o.innerHTML += ct;
o.setAttribute('data-ccount', cct);
}
function chatc_createtest(nc)
{
var a = new Array('uqkhjYh', 'uvkhjYh', 'uqkhjYh', 'uvkhjYh');
var av = new Array(2, 1, 3, 0);
chatc_create(a, av, nc);
}
function chatc_close(cid)
{
$('chatntfcid' + cid).style.display = "none";
}
function chatc_show(cid)
{
}
/* timer call for chat */
function chat_timercall()
{
var cct = $("chatntf").getAttribute('data-ccount');
if(cct == 0)
chat_switchicon(1);
else
chat_switchicon(0);
}
/*
1 - show
2 - hide
*/
function chat_switchicon(mode)
{
if(mode == 1)
{
$("sideicon_chat").style.display = "inline";
$("chatntf").style.top = "34px";
}else{
$("sideicon_chat").style.display = "none";
$("chatntf").style.top = "0px";
}
}
function chatc_call(uid)
{
ajax_post("php/tasks/chatcall.php?u=" + uid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
alert(xmlhttp.responseText);
}
}});
}
function chatc_view(rid, cs)
{
return chatc_viewex(rid, cs, 1);
}
function chatc_viewex(rid, cs, cpanelset)
{
if(cpanelset)
{
if(!chat_isopen())
{
chat_show();
}else{
if(rid == chat_croomid)
chat_hide();
}
}
chat_croomid = rid;
chat_croomsign = cs;
ajax_post("php/tasks/chatget.php?r=" + rid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chatv_refresh(jm, 0);
}
}});
}
function chatv_refresh(cd, cadd)
{
var uftext = "";
var ustext = "";
var i =0;
var usercount = cd.users.length;
for(i=0; i<usercount; i++)
{
if(i) {uftext += ", "; ustext += ", ";}
uftext += cd.users[i].name;
if(i < 2)
ustext += "<a href='u" + cd.users[i].id + "' onmouseover='vusr(this, \"" + cd.users[i].id + "\")'>" + cd.users[i].name + "</a>";
}
if(usercount > 2)
{
ustext += " <span title='" + uftext + "'>(+" + (usercount - 2) + ")</span>";
}
$("chatbox_cusers").innerHTML = ustext;
/* make the lines */
var linecount = cd.lines.length;
var ltext = "";
var clines = "";
var loe = "";
cd.lines.sort(function(a,b) { return parseFloat(a.time) - parseFloat(b.time) } );
i=0;
if(cadd && chat_lastlineid == cd.lines[0].id) i = 1;
for(; i<linecount; i++)
{
clines = "<p>" + cd.lines[i].line + "</p>"
if(cd.lines[i].tid % 2)
loe = "chatlineodd";
else
loe = "chatlineeven";
ltext += "<div class='chatbox_ci " + loe + "'><div class='chatbox_pic' onmouseover='vusr(this, \"" + cd.lines[i].user + "\");' title='" + chatjson_findusername(cd.users, cd.lines[i].user) + "'><img src='data/u" + cd.lines[i].userloc + "/dp/3.jpg' onerror='failsafe_img(this, 3);'/></div><div class='chatbox_ct'><div id='chatbox_df" + cd.lines[i].tid + "'" + chat_format(clines) + "</div><abbr class='synctime' data-ts='" + cd.lines[i].time + "' data-mode='0'>ddddd</abbr></div></div>";
}
if(!cadd)
{
$("chattext").innerHTML = ltext;
}else{
if(chat_lastlineid == cd.lines[0].tid)
{
clines = "<p>" + cd.lines[0].line + "</p>"
var o = $("chatbox_df" + chat_lastlineid);
if(o) o.innerHTML = chat_format(clines);
else $("chattext").innerHTML += ltext;
}else{
$("chattext").innerHTML += ltext;
}
}
var ct = $("chattext");
synctime_set(ct);
//ct.scrollTop = ct.scrollHeight;
scroll_banimate(ct, 1);
chat_lastlineid = cd.lines[linecount-1].tid;
}
function chatjson_findusername(ju, id)
{
var usercount = ju.length;
for(i=0; i<usercount; i++)
{
if(ju[i].id == id) return ju[i].name;
}
return "You";
}
function chat_reload()
{
var ods = $("divdataset1");
chat_croomid = ods.getAttribute('data-ecroomid');
chat_croomsign = ods.getAttribute('data-eroomsign');
chat_availability = ods.getAttribute('data-eavailability');
chat_lastmsgtime = ods.getAttribute('data-elastmsgtime');
}
function chat_get_friendsgrid()
{
ajax_post("php/tasks/friendsearch.php?av=1&lm=16", function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chat_refresh_friendsgrid(jm);
}
}});
}
function chat_refresh_friendsgrid(jd)
{
if(!jd || !jd.users) return;
var dtext = "<div class='chat_available_line'>";
var j=0;
var dctext = "";
for(var i=0; i<jd.users.length; i++)
{
if(j >= 8)
{
dtext += "</div><div class='chat_available_line'>";
j = 0;
}
dctext = "<div class='chat_available_pic' style='cursor: pointer;' onclick=\"chatc_call('" + jd.users[i].uid + "');\"><img src='data/u" + jd.users[i].lid + "/dp/2.jpg' onerror='failsafe_img(this, 3);'width='33px' onmouseover=\"vusr(this, '" + jd.users[i].uid + "');\"/></div>";
dtext += dctext;
j++;
}
dtext += "</div>";
$("chat_friendgrid").innerHTML = dtext;
chat_resize();
} | {
return unescape(y);
} | conditional_block |
chat.js | var chat_croomid = 0;
var chat_croomsign = 0;
var chat_availability = 0;
var chat_lastmsgtime = 0;
var chat_lastlineid = 0;
function setCookie(c_name,value,exdays)
{
var exdate=new Date();
exdate.setDate(exdate.getDate() + exdays);
var c_value=escape(value) + ((exdays==null) ? "" : "; expires="+exdate.toUTCString());
document.cookie=c_name + "=" + c_value;
}
function getCookie(c_name)
{
var i,x,y,ARRcookies=document.cookie.split(";");
for (i=0;i<ARRcookies.length;i++)
{
x=ARRcookies[i].substr(0,ARRcookies[i].indexOf("="));
y=ARRcookies[i].substr(ARRcookies[i].indexOf("=")+1);
x=x.replace(/^\s+|\s+$/g,"");
if (x==c_name)
{
return unescape(y);
}
}
}
function chat_show()
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
chat_resize();
post_center();
}
function chat_hide()
|
function chat_isopen()
{
if(getCookie("cards_chatbox") == "1")
return 1;
else
return 0;
}
function showhide_chat(item)
{
if($(item).style.visibility != 'visible')
{
$(item).style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
}else{
$(item).style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
}
chat_resize();
post_center();
}
function chat_resize()
{
$("chatcontent").style.height = g_win_height - ($("cboxtop").offsetHeight + 30) + 'px';
var pha_content = $('pha_content');
if(pha_content)
{
if(chat_isopen())
pha_content.style.width = (g_win_width - 330) + 'px';
else
pha_content.style.width = (g_win_width - 1) + 'px';
}
var ct = $("chattext");
ct.scrollTop = ct.scrollHeight;
}
//window.onresize = chat_resize;
function chat_init()
{
$("chattextinput").onkeyup = function(e)
{
e = e || event;
if (e.keyCode === 13 && !e.shiftKey) {
chat_sendmsg();
}
return true;
}
chat_reload();
if(getCookie("cards_chatbox") == "1")
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
chat_resize();
if(chat_croomid)
{
chatc_viewex(chat_croomid, chat_croomsign, 0);
}
}else{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
}
}
/* conversation management */
function chatc_clear()
{
var o = $('chatntf');
o.innerHTML = "";
o.setAttribute('data-ccount', 0);
}
function chatc_create(users, userav, newcount, rid, csignature, usersloc)
{
var o = $('chatntf');
/* offline available busy away */
var avm = ["555555", "99cc66", "ff6633", "ffcc00"];
var nbv = "";
var ulist = "";
var maxut = o.getAttribute('data-maxt');
var cct = o.getAttribute('data-ccount');
var ppt = "";
cct = parseInt(cct) + 1;
if(users.length > maxut)
ppt = "<div class='chatntf_plus'></div>";
if(users.length < maxut)
maxut = users.length;
for(var i=0; i<maxut; i++)
{
//ulist += "<a href='#'><div class='chatntf_pic' onmouseover='vusr(this, \"" + users[i] + "\")' style=\"background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
if(usersloc[i] != 0) /* image available */
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
else
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('images/failsafe/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
//ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer;\"><img src='data/u" + usersloc[i] + "/dp/2.jpg' onerror='failsafe_img(this, 2);'/><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
}
if(newcount) nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "'>" + newcount + "</div>";
else nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "' style='visibility: hidden;'>" + newcount + "</div>";
var ct = "<div class=\"chatntf_box\" onclick='chatc_view(\"" + rid + "\", \"" + csignature + "\")' data-rid='" + rid + "' data-cs='" + csignature + "' id='chatntfcid" + cct + "'><div class=\"chatntf_pic_st\"></div>" +
ulist + nbv + "<div class='chatntf_x' onclick=\"javascript: chatc_close('" + cct + "');\"></div>" + ppt + "</div><div style='clear:both;'></div>";
o.innerHTML += ct;
o.setAttribute('data-ccount', cct);
}
function chatc_createtest(nc)
{
var a = new Array('uqkhjYh', 'uvkhjYh', 'uqkhjYh', 'uvkhjYh');
var av = new Array(2, 1, 3, 0);
chatc_create(a, av, nc);
}
function chatc_close(cid)
{
$('chatntfcid' + cid).style.display = "none";
}
function chatc_show(cid)
{
}
/* timer call for chat */
function chat_timercall()
{
var cct = $("chatntf").getAttribute('data-ccount');
if(cct == 0)
chat_switchicon(1);
else
chat_switchicon(0);
}
/*
1 - show
2 - hide
*/
function chat_switchicon(mode)
{
if(mode == 1)
{
$("sideicon_chat").style.display = "inline";
$("chatntf").style.top = "34px";
}else{
$("sideicon_chat").style.display = "none";
$("chatntf").style.top = "0px";
}
}
function chatc_call(uid)
{
ajax_post("php/tasks/chatcall.php?u=" + uid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
alert(xmlhttp.responseText);
}
}});
}
function chatc_view(rid, cs)
{
return chatc_viewex(rid, cs, 1);
}
function chatc_viewex(rid, cs, cpanelset)
{
if(cpanelset)
{
if(!chat_isopen())
{
chat_show();
}else{
if(rid == chat_croomid)
chat_hide();
}
}
chat_croomid = rid;
chat_croomsign = cs;
ajax_post("php/tasks/chatget.php?r=" + rid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chatv_refresh(jm, 0);
}
}});
}
function chatv_refresh(cd, cadd)
{
var uftext = "";
var ustext = "";
var i =0;
var usercount = cd.users.length;
for(i=0; i<usercount; i++)
{
if(i) {uftext += ", "; ustext += ", ";}
uftext += cd.users[i].name;
if(i < 2)
ustext += "<a href='u" + cd.users[i].id + "' onmouseover='vusr(this, \"" + cd.users[i].id + "\")'>" + cd.users[i].name + "</a>";
}
if(usercount > 2)
{
ustext += " <span title='" + uftext + "'>(+" + (usercount - 2) + ")</span>";
}
$("chatbox_cusers").innerHTML = ustext;
/* make the lines */
var linecount = cd.lines.length;
var ltext = "";
var clines = "";
var loe = "";
cd.lines.sort(function(a,b) { return parseFloat(a.time) - parseFloat(b.time) } );
i=0;
if(cadd && chat_lastlineid == cd.lines[0].id) i = 1;
for(; i<linecount; i++)
{
clines = "<p>" + cd.lines[i].line + "</p>"
if(cd.lines[i].tid % 2)
loe = "chatlineodd";
else
loe = "chatlineeven";
ltext += "<div class='chatbox_ci " + loe + "'><div class='chatbox_pic' onmouseover='vusr(this, \"" + cd.lines[i].user + "\");' title='" + chatjson_findusername(cd.users, cd.lines[i].user) + "'><img src='data/u" + cd.lines[i].userloc + "/dp/3.jpg' onerror='failsafe_img(this, 3);'/></div><div class='chatbox_ct'><div id='chatbox_df" + cd.lines[i].tid + "'" + chat_format(clines) + "</div><abbr class='synctime' data-ts='" + cd.lines[i].time + "' data-mode='0'>ddddd</abbr></div></div>";
}
if(!cadd)
{
$("chattext").innerHTML = ltext;
}else{
if(chat_lastlineid == cd.lines[0].tid)
{
clines = "<p>" + cd.lines[0].line + "</p>"
var o = $("chatbox_df" + chat_lastlineid);
if(o) o.innerHTML = chat_format(clines);
else $("chattext").innerHTML += ltext;
}else{
$("chattext").innerHTML += ltext;
}
}
var ct = $("chattext");
synctime_set(ct);
//ct.scrollTop = ct.scrollHeight;
scroll_banimate(ct, 1);
chat_lastlineid = cd.lines[linecount-1].tid;
}
function chatjson_findusername(ju, id)
{
var usercount = ju.length;
for(i=0; i<usercount; i++)
{
if(ju[i].id == id) return ju[i].name;
}
return "You";
}
function chat_reload()
{
var ods = $("divdataset1");
chat_croomid = ods.getAttribute('data-ecroomid');
chat_croomsign = ods.getAttribute('data-eroomsign');
chat_availability = ods.getAttribute('data-eavailability');
chat_lastmsgtime = ods.getAttribute('data-elastmsgtime');
}
function chat_get_friendsgrid()
{
ajax_post("php/tasks/friendsearch.php?av=1&lm=16", function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chat_refresh_friendsgrid(jm);
}
}});
}
function chat_refresh_friendsgrid(jd)
{
if(!jd || !jd.users) return;
var dtext = "<div class='chat_available_line'>";
var j=0;
var dctext = "";
for(var i=0; i<jd.users.length; i++)
{
if(j >= 8)
{
dtext += "</div><div class='chat_available_line'>";
j = 0;
}
dctext = "<div class='chat_available_pic' style='cursor: pointer;' onclick=\"chatc_call('" + jd.users[i].uid + "');\"><img src='data/u" + jd.users[i].lid + "/dp/2.jpg' onerror='failsafe_img(this, 3);'width='33px' onmouseover=\"vusr(this, '" + jd.users[i].uid + "');\"/></div>";
dtext += dctext;
j++;
}
dtext += "</div>";
$("chat_friendgrid").innerHTML = dtext;
chat_resize();
} | {
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
chat_resize();
post_center();
} | identifier_body |
chat.js | var chat_croomid = 0;
var chat_croomsign = 0;
var chat_availability = 0;
var chat_lastmsgtime = 0;
var chat_lastlineid = 0;
function setCookie(c_name,value,exdays)
{
var exdate=new Date();
exdate.setDate(exdate.getDate() + exdays);
var c_value=escape(value) + ((exdays==null) ? "" : "; expires="+exdate.toUTCString());
document.cookie=c_name + "=" + c_value;
}
function getCookie(c_name)
{
var i,x,y,ARRcookies=document.cookie.split(";");
for (i=0;i<ARRcookies.length;i++)
{
x=ARRcookies[i].substr(0,ARRcookies[i].indexOf("="));
y=ARRcookies[i].substr(ARRcookies[i].indexOf("=")+1);
x=x.replace(/^\s+|\s+$/g,"");
if (x==c_name)
{
return unescape(y);
}
}
}
function chat_show()
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
chat_resize();
post_center();
}
function chat_hide()
{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
chat_resize();
post_center();
}
function chat_isopen()
{
if(getCookie("cards_chatbox") == "1")
return 1;
else
return 0;
}
function showhide_chat(item)
{
if($(item).style.visibility != 'visible')
{
$(item).style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
}else{
$(item).style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
}
chat_resize();
post_center();
}
function chat_resize()
{
$("chatcontent").style.height = g_win_height - ($("cboxtop").offsetHeight + 30) + 'px';
var pha_content = $('pha_content');
if(pha_content)
{
if(chat_isopen())
pha_content.style.width = (g_win_width - 330) + 'px';
else
pha_content.style.width = (g_win_width - 1) + 'px';
}
var ct = $("chattext");
ct.scrollTop = ct.scrollHeight;
}
//window.onresize = chat_resize;
function chat_init()
{
$("chattextinput").onkeyup = function(e)
{
e = e || event;
if (e.keyCode === 13 && !e.shiftKey) {
chat_sendmsg();
}
return true;
}
chat_reload();
if(getCookie("cards_chatbox") == "1")
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
chat_resize();
if(chat_croomid)
{
chatc_viewex(chat_croomid, chat_croomsign, 0);
}
}else{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
}
}
/* conversation management */
function chatc_clear()
{
var o = $('chatntf');
o.innerHTML = "";
o.setAttribute('data-ccount', 0);
}
function | (users, userav, newcount, rid, csignature, usersloc)
{
var o = $('chatntf');
/* offline available busy away */
var avm = ["555555", "99cc66", "ff6633", "ffcc00"];
var nbv = "";
var ulist = "";
var maxut = o.getAttribute('data-maxt');
var cct = o.getAttribute('data-ccount');
var ppt = "";
cct = parseInt(cct) + 1;
if(users.length > maxut)
ppt = "<div class='chatntf_plus'></div>";
if(users.length < maxut)
maxut = users.length;
for(var i=0; i<maxut; i++)
{
//ulist += "<a href='#'><div class='chatntf_pic' onmouseover='vusr(this, \"" + users[i] + "\")' style=\"background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
if(usersloc[i] != 0) /* image available */
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
else
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('images/failsafe/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
//ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer;\"><img src='data/u" + usersloc[i] + "/dp/2.jpg' onerror='failsafe_img(this, 2);'/><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
}
if(newcount) nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "'>" + newcount + "</div>";
else nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "' style='visibility: hidden;'>" + newcount + "</div>";
var ct = "<div class=\"chatntf_box\" onclick='chatc_view(\"" + rid + "\", \"" + csignature + "\")' data-rid='" + rid + "' data-cs='" + csignature + "' id='chatntfcid" + cct + "'><div class=\"chatntf_pic_st\"></div>" +
ulist + nbv + "<div class='chatntf_x' onclick=\"javascript: chatc_close('" + cct + "');\"></div>" + ppt + "</div><div style='clear:both;'></div>";
o.innerHTML += ct;
o.setAttribute('data-ccount', cct);
}
function chatc_createtest(nc)
{
var a = new Array('uqkhjYh', 'uvkhjYh', 'uqkhjYh', 'uvkhjYh');
var av = new Array(2, 1, 3, 0);
chatc_create(a, av, nc);
}
function chatc_close(cid)
{
$('chatntfcid' + cid).style.display = "none";
}
function chatc_show(cid)
{
}
/* timer call for chat */
function chat_timercall()
{
var cct = $("chatntf").getAttribute('data-ccount');
if(cct == 0)
chat_switchicon(1);
else
chat_switchicon(0);
}
/*
1 - show
2 - hide
*/
function chat_switchicon(mode)
{
if(mode == 1)
{
$("sideicon_chat").style.display = "inline";
$("chatntf").style.top = "34px";
}else{
$("sideicon_chat").style.display = "none";
$("chatntf").style.top = "0px";
}
}
function chatc_call(uid)
{
ajax_post("php/tasks/chatcall.php?u=" + uid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
alert(xmlhttp.responseText);
}
}});
}
function chatc_view(rid, cs)
{
return chatc_viewex(rid, cs, 1);
}
function chatc_viewex(rid, cs, cpanelset)
{
if(cpanelset)
{
if(!chat_isopen())
{
chat_show();
}else{
if(rid == chat_croomid)
chat_hide();
}
}
chat_croomid = rid;
chat_croomsign = cs;
ajax_post("php/tasks/chatget.php?r=" + rid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chatv_refresh(jm, 0);
}
}});
}
function chatv_refresh(cd, cadd)
{
var uftext = "";
var ustext = "";
var i =0;
var usercount = cd.users.length;
for(i=0; i<usercount; i++)
{
if(i) {uftext += ", "; ustext += ", ";}
uftext += cd.users[i].name;
if(i < 2)
ustext += "<a href='u" + cd.users[i].id + "' onmouseover='vusr(this, \"" + cd.users[i].id + "\")'>" + cd.users[i].name + "</a>";
}
if(usercount > 2)
{
ustext += " <span title='" + uftext + "'>(+" + (usercount - 2) + ")</span>";
}
$("chatbox_cusers").innerHTML = ustext;
/* make the lines */
var linecount = cd.lines.length;
var ltext = "";
var clines = "";
var loe = "";
cd.lines.sort(function(a,b) { return parseFloat(a.time) - parseFloat(b.time) } );
i=0;
if(cadd && chat_lastlineid == cd.lines[0].id) i = 1;
for(; i<linecount; i++)
{
clines = "<p>" + cd.lines[i].line + "</p>"
if(cd.lines[i].tid % 2)
loe = "chatlineodd";
else
loe = "chatlineeven";
ltext += "<div class='chatbox_ci " + loe + "'><div class='chatbox_pic' onmouseover='vusr(this, \"" + cd.lines[i].user + "\");' title='" + chatjson_findusername(cd.users, cd.lines[i].user) + "'><img src='data/u" + cd.lines[i].userloc + "/dp/3.jpg' onerror='failsafe_img(this, 3);'/></div><div class='chatbox_ct'><div id='chatbox_df" + cd.lines[i].tid + "'" + chat_format(clines) + "</div><abbr class='synctime' data-ts='" + cd.lines[i].time + "' data-mode='0'>ddddd</abbr></div></div>";
}
if(!cadd)
{
$("chattext").innerHTML = ltext;
}else{
if(chat_lastlineid == cd.lines[0].tid)
{
clines = "<p>" + cd.lines[0].line + "</p>"
var o = $("chatbox_df" + chat_lastlineid);
if(o) o.innerHTML = chat_format(clines);
else $("chattext").innerHTML += ltext;
}else{
$("chattext").innerHTML += ltext;
}
}
var ct = $("chattext");
synctime_set(ct);
//ct.scrollTop = ct.scrollHeight;
scroll_banimate(ct, 1);
chat_lastlineid = cd.lines[linecount-1].tid;
}
function chatjson_findusername(ju, id)
{
var usercount = ju.length;
for(i=0; i<usercount; i++)
{
if(ju[i].id == id) return ju[i].name;
}
return "You";
}
function chat_reload()
{
var ods = $("divdataset1");
chat_croomid = ods.getAttribute('data-ecroomid');
chat_croomsign = ods.getAttribute('data-eroomsign');
chat_availability = ods.getAttribute('data-eavailability');
chat_lastmsgtime = ods.getAttribute('data-elastmsgtime');
}
function chat_get_friendsgrid()
{
ajax_post("php/tasks/friendsearch.php?av=1&lm=16", function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chat_refresh_friendsgrid(jm);
}
}});
}
function chat_refresh_friendsgrid(jd)
{
if(!jd || !jd.users) return;
var dtext = "<div class='chat_available_line'>";
var j=0;
var dctext = "";
for(var i=0; i<jd.users.length; i++)
{
if(j >= 8)
{
dtext += "</div><div class='chat_available_line'>";
j = 0;
}
dctext = "<div class='chat_available_pic' style='cursor: pointer;' onclick=\"chatc_call('" + jd.users[i].uid + "');\"><img src='data/u" + jd.users[i].lid + "/dp/2.jpg' onerror='failsafe_img(this, 3);'width='33px' onmouseover=\"vusr(this, '" + jd.users[i].uid + "');\"/></div>";
dtext += dctext;
j++;
}
dtext += "</div>";
$("chat_friendgrid").innerHTML = dtext;
chat_resize();
} | chatc_create | identifier_name |
chat.js | var chat_croomid = 0;
var chat_croomsign = 0;
var chat_availability = 0;
var chat_lastmsgtime = 0;
var chat_lastlineid = 0;
function setCookie(c_name,value,exdays)
{
var exdate=new Date();
exdate.setDate(exdate.getDate() + exdays);
var c_value=escape(value) + ((exdays==null) ? "" : "; expires="+exdate.toUTCString());
document.cookie=c_name + "=" + c_value;
}
function getCookie(c_name)
{
var i,x,y,ARRcookies=document.cookie.split(";");
for (i=0;i<ARRcookies.length;i++)
{
x=ARRcookies[i].substr(0,ARRcookies[i].indexOf("="));
y=ARRcookies[i].substr(ARRcookies[i].indexOf("=")+1);
x=x.replace(/^\s+|\s+$/g,"");
if (x==c_name)
{
return unescape(y);
}
}
}
function chat_show()
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
chat_resize();
post_center();
}
function chat_hide()
{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
| chat_resize();
post_center();
}
function chat_isopen()
{
if(getCookie("cards_chatbox") == "1")
return 1;
else
return 0;
}
function showhide_chat(item)
{
if($(item).style.visibility != 'visible')
{
$(item).style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
}else{
$(item).style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
}
chat_resize();
post_center();
}
function chat_resize()
{
$("chatcontent").style.height = g_win_height - ($("cboxtop").offsetHeight + 30) + 'px';
var pha_content = $('pha_content');
if(pha_content)
{
if(chat_isopen())
pha_content.style.width = (g_win_width - 330) + 'px';
else
pha_content.style.width = (g_win_width - 1) + 'px';
}
var ct = $("chattext");
ct.scrollTop = ct.scrollHeight;
}
//window.onresize = chat_resize;
function chat_init()
{
$("chattextinput").onkeyup = function(e)
{
e = e || event;
if (e.keyCode === 13 && !e.shiftKey) {
chat_sendmsg();
}
return true;
}
chat_reload();
if(getCookie("cards_chatbox") == "1")
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
chat_resize();
if(chat_croomid)
{
chatc_viewex(chat_croomid, chat_croomsign, 0);
}
}else{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
}
}
/* conversation management */
function chatc_clear()
{
var o = $('chatntf');
o.innerHTML = "";
o.setAttribute('data-ccount', 0);
}
function chatc_create(users, userav, newcount, rid, csignature, usersloc)
{
var o = $('chatntf');
/* offline available busy away */
var avm = ["555555", "99cc66", "ff6633", "ffcc00"];
var nbv = "";
var ulist = "";
var maxut = o.getAttribute('data-maxt');
var cct = o.getAttribute('data-ccount');
var ppt = "";
cct = parseInt(cct) + 1;
if(users.length > maxut)
ppt = "<div class='chatntf_plus'></div>";
if(users.length < maxut)
maxut = users.length;
for(var i=0; i<maxut; i++)
{
//ulist += "<a href='#'><div class='chatntf_pic' onmouseover='vusr(this, \"" + users[i] + "\")' style=\"background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
if(usersloc[i] != 0) /* image available */
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
else
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('images/failsafe/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
//ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer;\"><img src='data/u" + usersloc[i] + "/dp/2.jpg' onerror='failsafe_img(this, 2);'/><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
}
if(newcount) nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "'>" + newcount + "</div>";
else nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "' style='visibility: hidden;'>" + newcount + "</div>";
var ct = "<div class=\"chatntf_box\" onclick='chatc_view(\"" + rid + "\", \"" + csignature + "\")' data-rid='" + rid + "' data-cs='" + csignature + "' id='chatntfcid" + cct + "'><div class=\"chatntf_pic_st\"></div>" +
ulist + nbv + "<div class='chatntf_x' onclick=\"javascript: chatc_close('" + cct + "');\"></div>" + ppt + "</div><div style='clear:both;'></div>";
o.innerHTML += ct;
o.setAttribute('data-ccount', cct);
}
function chatc_createtest(nc)
{
var a = new Array('uqkhjYh', 'uvkhjYh', 'uqkhjYh', 'uvkhjYh');
var av = new Array(2, 1, 3, 0);
chatc_create(a, av, nc);
}
function chatc_close(cid)
{
$('chatntfcid' + cid).style.display = "none";
}
function chatc_show(cid)
{
}
/* timer call for chat */
function chat_timercall()
{
var cct = $("chatntf").getAttribute('data-ccount');
if(cct == 0)
chat_switchicon(1);
else
chat_switchicon(0);
}
/*
1 - show
2 - hide
*/
function chat_switchicon(mode)
{
if(mode == 1)
{
$("sideicon_chat").style.display = "inline";
$("chatntf").style.top = "34px";
}else{
$("sideicon_chat").style.display = "none";
$("chatntf").style.top = "0px";
}
}
function chatc_call(uid)
{
ajax_post("php/tasks/chatcall.php?u=" + uid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
alert(xmlhttp.responseText);
}
}});
}
function chatc_view(rid, cs)
{
return chatc_viewex(rid, cs, 1);
}
function chatc_viewex(rid, cs, cpanelset)
{
if(cpanelset)
{
if(!chat_isopen())
{
chat_show();
}else{
if(rid == chat_croomid)
chat_hide();
}
}
chat_croomid = rid;
chat_croomsign = cs;
ajax_post("php/tasks/chatget.php?r=" + rid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chatv_refresh(jm, 0);
}
}});
}
function chatv_refresh(cd, cadd)
{
var uftext = "";
var ustext = "";
var i =0;
var usercount = cd.users.length;
for(i=0; i<usercount; i++)
{
if(i) {uftext += ", "; ustext += ", ";}
uftext += cd.users[i].name;
if(i < 2)
ustext += "<a href='u" + cd.users[i].id + "' onmouseover='vusr(this, \"" + cd.users[i].id + "\")'>" + cd.users[i].name + "</a>";
}
if(usercount > 2)
{
ustext += " <span title='" + uftext + "'>(+" + (usercount - 2) + ")</span>";
}
$("chatbox_cusers").innerHTML = ustext;
/* make the lines */
var linecount = cd.lines.length;
var ltext = "";
var clines = "";
var loe = "";
cd.lines.sort(function(a,b) { return parseFloat(a.time) - parseFloat(b.time) } );
i=0;
if(cadd && chat_lastlineid == cd.lines[0].id) i = 1;
for(; i<linecount; i++)
{
clines = "<p>" + cd.lines[i].line + "</p>"
if(cd.lines[i].tid % 2)
loe = "chatlineodd";
else
loe = "chatlineeven";
ltext += "<div class='chatbox_ci " + loe + "'><div class='chatbox_pic' onmouseover='vusr(this, \"" + cd.lines[i].user + "\");' title='" + chatjson_findusername(cd.users, cd.lines[i].user) + "'><img src='data/u" + cd.lines[i].userloc + "/dp/3.jpg' onerror='failsafe_img(this, 3);'/></div><div class='chatbox_ct'><div id='chatbox_df" + cd.lines[i].tid + "'" + chat_format(clines) + "</div><abbr class='synctime' data-ts='" + cd.lines[i].time + "' data-mode='0'>ddddd</abbr></div></div>";
}
if(!cadd)
{
$("chattext").innerHTML = ltext;
}else{
if(chat_lastlineid == cd.lines[0].tid)
{
clines = "<p>" + cd.lines[0].line + "</p>"
var o = $("chatbox_df" + chat_lastlineid);
if(o) o.innerHTML = chat_format(clines);
else $("chattext").innerHTML += ltext;
}else{
$("chattext").innerHTML += ltext;
}
}
var ct = $("chattext");
synctime_set(ct);
//ct.scrollTop = ct.scrollHeight;
scroll_banimate(ct, 1);
chat_lastlineid = cd.lines[linecount-1].tid;
}
function chatjson_findusername(ju, id)
{
var usercount = ju.length;
for(i=0; i<usercount; i++)
{
if(ju[i].id == id) return ju[i].name;
}
return "You";
}
function chat_reload()
{
var ods = $("divdataset1");
chat_croomid = ods.getAttribute('data-ecroomid');
chat_croomsign = ods.getAttribute('data-eroomsign');
chat_availability = ods.getAttribute('data-eavailability');
chat_lastmsgtime = ods.getAttribute('data-elastmsgtime');
}
function chat_get_friendsgrid()
{
ajax_post("php/tasks/friendsearch.php?av=1&lm=16", function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chat_refresh_friendsgrid(jm);
}
}});
}
function chat_refresh_friendsgrid(jd)
{
if(!jd || !jd.users) return;
var dtext = "<div class='chat_available_line'>";
var j=0;
var dctext = "";
for(var i=0; i<jd.users.length; i++)
{
if(j >= 8)
{
dtext += "</div><div class='chat_available_line'>";
j = 0;
}
dctext = "<div class='chat_available_pic' style='cursor: pointer;' onclick=\"chatc_call('" + jd.users[i].uid + "');\"><img src='data/u" + jd.users[i].lid + "/dp/2.jpg' onerror='failsafe_img(this, 3);'width='33px' onmouseover=\"vusr(this, '" + jd.users[i].uid + "');\"/></div>";
dtext += dctext;
j++;
}
dtext += "</div>";
$("chat_friendgrid").innerHTML = dtext;
chat_resize();
} | setCookie("cards_chatbox", "0", 365);
| random_line_split |
canvas_play.js | 'use strict'
let ws, x, y, w, h, ID, stopRendering = false;
class Time {
constructor() {
this.one = 1800;
}
updateTime(newTime) {
clearInterval(this.interval);
this.nw = newTime.slice(0, -1);
setTimeout(() =>
this.interval = setInterval(() => this.upOne(), this.one * 1000),
(Math.floor(this.nw[0] / this.one) + 1) * this.one - this.nw[0]);
}
upOne() {
if (this.nw[4] < 3) this.nw[4]++
else {
if (this.nw[3] < 29) this.nw[3]++
else {
if (this.nw[2] < 11) this.nw[2]++
else {
this.nw[1]++;
this.nw[2] = 0;
}
this.nw[3] = 0;
}
this.nw[4] = 0;
}
}
getSeason() {
switch (this.nw[2]) {
case 2: case 3: case 4: return 0;
case 5: case 6: case 7: return 1;
case 8: case 9: case 10: return 2;
case 11: case 0: case 1: return 3;
}
}
getMoonPhase() {
const phase = this.nw[3] / 29;
if (phase == 0) return 0;
if (phase < 0.14) return 1;
if (phase < 0.48) return 2;
if (phase < 0.52) return 3;
if (phase < 0.96) return 4;
return 5;
}
getDateAsString() {
let string = (() => {
let s = this.nw[2];
s = s <= 2 ? s :
s <= 5 ? s - 3 :
s <= 8 ? s - 6 : s - 9;
s /= 3;
if (s < 0.1) return 'Начало';
if (s < 0.5) return 'Первая половина';
if (s < 0.9) return 'Вторая половина';
return 'Конец'
})();
switch (this.getSeason()) {
case 0: string += ' сезона Голых Деревьев, '; break;
case 1: string += ' сезона Юных Листьев, '; break;
case 2: string += ' сезона Зелёных Деревьев, '; break;
case 3: string += ' листопада, '; break;
}
switch (this.getMoonPhase()) {
case 0: return string += 'новолуние';
case 1: return string += 'растущая луна';
case 2: return string += 'первая половина луны';
case 3: return string += 'половина луны';
case 4: return string += 'вторая половина луны';
case 5: return string += 'полнолуние';
}
}
}
class AdditionToCanvas {
rotateHorizontally(img) {
const d = img.data;
for (let i = 0; i < img.height; i++) {
for (let j = 0; j < img.width / 2; j++) {
const p0 = i * img.width * 4 + j * 4,
p1 = i * img.width * 4 + (img.width - j) * 4;
for (let k = 0; k < 4; k++) {
const t = d[p0 + k];
d[p0 + k] = d[p1 + k];
d[p1 + k] = t;
}
}
}
return img;
}
}
class Game {
constructor() {
//super();
this.host = location.origin.replace(/^http/, 'ws');
this.time = new Time();
this.layer = [
document.getElementById('zero-layer').getContext('2d'),
document.getElementById('first-layer').getContext('2d'),
document.getElementById('under-layer').getContext('2d'),
document.getElementById('tmp').getContext('2d')
]
this.canvas = new AdditionToCanvas();
this.scale = 1;
this.nowl = 0;
this.area = {
type: 0,
react: (X, Y) => {
send(102, [X / x, (Y - h * 0.55) / y]);
}
}
this.colorOfInterface = {
main: '#9b6c40',
text: '#000000'
}
this.all = [];
this.cats = new Map();
this.spaces = new Set([this.area]);
}
cookSpace(data, f = a => a) {
data.react = f;
this.spaces.add(data);
return data;
}
deleteSpace(link) {
return this.spaces.delete(link);
}
computeVector(A, B) {
return [
B[0] - A[0],
B[1] - A[1],
Math.round(Math.sqrt(Math.pow(B[0] - A[0], 2) + Math.pow(B[1] - A[1], 2)))
];
}
serveText(text, maxTextLength = 1) {
maxTextLength = Math.floor(maxTextLength * 40);
const result = [''], max = Math.floor(maxTextLength / 2);
text = text.match(/\s*[\S]+\s*/g);
for (let i = 0; i < text.length; i++) {
const s = text[i];
if (s.length > max) {
text.splice(i + 1, 0, s.slice(0, max), s.slice(max + 1));
continue;
}
if (result[result.length - 1].length + s.length > maxTextLength) {
result.push(s);
} else result[result.length - 1] += s;
}
return result;
}
initCanvas() {
w = document.documentElement.clientWidth * this.scale;
h = document.documentElement.clientHeight * this.scale;
let zero = this.layer[0],
first = this.layer[1],
under = this.layer[2],
totalHight = h * 0.85; //0.3 + 0.55
zero.canvas.width = w;
zero.canvas.height = totalHight;
first.canvas.width = w;
first.canvas.height = totalHight;
under.canvas.width = w;
under.canvas.height = totalHight;
this.area.range = [0,h*0.55,w,totalHight]
x = w / 160;
y = h * 0.3 / 27;
}
async render() {
console.time('render')
if (stopRendering) return requestAnimationFrame(game.render);
//при изменениее game.scale:
//1) перерисовать under-layer
//2) пересчитать параметры
const l = game.layer[game.nowl],
areaHight = h * 0.55;
game.all.sort((a,b) => a.place[1] - b.place[1]);
game.all.forEach(i => {
const placeX = Math.floor(i.place[0] * x),
placeY = Math.floor(areaHight + i.place[1] * y);
l.drawImage(i.skin, i.sprite, i.dir ? 0 : i.h,
i.w, i.h, placeX - i.out,
placeY - i.hsc,
i.wsc, i.hsc);
// if (i.paintedMsg)
// l.drawImage(i.paintedMsg, Math.floor(i.place[0] * x - i.paintedMsg.width/2/*- i.out*/),
// Math.floor(areaHight + i.place[1] * y - i.hsc - i.paintedMsg.height));
if (i.msg) {
for (let j = 0, p = 0; j < i.msg.length; j++) {
console.log(i.msg[j])
l.drawImage(i.msg[j].image, placeX, placeY - i.hsc - p);
p += i.msg[j].height;
}
}
});
game.nowl ^= 1;
game.layer[game.nowl].clearRect(0, 0, w, h);
requestAnimationFrame(game.render);
stopRendering = true;
console.timeEnd('render')
}
space(s = {}, X, Y) {
switch (s.type) {
case 0:
s = s.range;
if (X >= s[0] && X <= s[2] &&
Y >= s[1] && Y <= s[3]) return true;
break;
}
}
openConnection() {
//if a connection already exists, try to close the connection
if (ws) ws.close();
ws = new WebSocket(this.host, 'play');
ws.onopen = () => {
get('/getCookie').then(res => {
if (res.code == 1) send(100, res.headers)
else console.log('Ошибка авторизации');
});
}
ws.onmessage = e => {
const {code, msg} = JSON.parse(e.data);
console.log({code,msg});
switch (code) {
case 100:
game.time.updateTime(msg.time);
game.location = new Location(msg.loc);
ID = msg.id;
requestAnimationFrame(game.render);
break;
case 101:
game.cats.get(msg.id).addMsg(msg.text);
break;
case 104:
game.cats.get(msg.id).walk(msg.msg);
break;
case 105:
game.location.clear();
game.location.fill(msg.fill);
break;
case 107:
if (game.cats.get(msg.id)) return;
new Cat(msg);
break;
case 108:
game.time.updateTime(msg);
break;
}
}
}
}
var game = new Game();
class Animal {
constructor(raw = {}) {
this.msg = [];
}
roundedRect(c, x, y, width, height, radius, t) {
c.beginPath();
c.moveTo(x,y+radius);
c.lineTo(x,y+height-radius);
c.quadraticCurveTo(x,y+height,x+radius,y+height);
if (t) {
c.lineTo(x+width/2-4,y+height);
c.lineTo(x+width/2,y+height+7);
c.lineTo(x+width/2+4,y+height);
}
c.lineTo(x+width-radius,y+height);
c.quadraticCurveTo(x+width,y+height,x+width,y+height-radius);
c.lineTo(x+width,y+radius);
c.quadraticCurveTo(x+width,y,x+width-radius,y);
c.lineTo(x+radius,y);
c.quadraticCurveTo(x,y,x,y+radius);
c.stroke();
}
deleteMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
if (i == 9) {
this.msg.pop();
link.killed = true;
} else link.sprite[0] = link.width * i;
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.deleteMsg(this.msg[this.msg.length - 1]);
const m = {}
m.text = game.serveText(msg, this.size + this.customTextSize);
m.sprite = [];
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.deleteMsg(m);
}, 10000);
}
renderMsg(m) {
/*
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = document.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
console.time('draw')
const tmp = game.layer[3],
k = 0.8 + 0.2 * this.size + this.customTextSize,
interval = 12 * k,
rectHeight = m.text.length * interval + 4 * k,
rectWidth = m.text[0].length * 9 * k,
height = rectHeight + 4,
width = rectWidth + 4,
canvasHeight = height * 2 + 7,
last = width * 6;
console.log(m.text)
tmp.canvas.height = canvasHeight;
tmp.canvas.width = last;
for (let i = 0; i < 2; i++) {
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp, 2, (i ? 2 : 9 + height), rectWidth, rectHeight, 5, i);
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b, 5, 13 * k + interval * j + (i ? 0 : 7 + height), rectWidth));
}
for (let i = width, alpha = 0.7; i < last; i += width, alpha *= 0.6) {
const result = tmp.getImageData(0, 0, width, canvasHeight),
r = result.data;
for (let j = 3; j < r.length; j += 4) {
r[j] *= alpha;
}
tmp.putImageData(result, i, 0);
}
console.timeEnd('draw')
return tmp.canvas;
}
/*
extMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
console.log(link.alpha + ': ' + link.text[0]);
if (i == 9) {
this.msg.pop();
link.killed = true;
} else {
link.alpha = 1.0 - i * i / 81;
this.shiftAlphaMsg(link);
}
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.extMsg(this.msg[this.msg.length-1]);
const m = {}
//m.alpha = 1.0;
m.text = game.serveText(msg);
this.drawMsg(m, 1);
if (this.msg[0]) this.drawMsg(this.msg[0]);
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.extMsg(m);
}, 10000);
//this.drawMsg();
} */ /*
shiftAlphaMsg(m) {
const tmp = game.layer[3];
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.globalAlpha = m.alpha;
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
m.image = image;
stopRendering = false;
}
} */
/*
drawMsg(m, newMsg) {
console.log(m)
if (newMsg) m.k = 0.8 + 0.2 * this.size + this.customTextSize;
const tmp = game.layer[3],
interval = 12 * m.k;
if (newMsg) {
m.rectHeight = m.text.length * interval + 3,
m.rectWidth = m.text[0].length * 9 * m.k;
}
m.height = m.rectHeight + (newMsg ? 11 : 4);
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,m.rectWidth,m.rectHeight,5,(newMsg ? true : false));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = document.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
/*
drawMsg() {
if (this.msg.length == 0) {
delete this.paintedMsg;
stopRendering = false;
return;
}
console.time('draw')
const k = 0.8 + 0.2 * this.size + this.customTextSize,
tmp = game.layer[3];
let interval = 12 * k,
s = 0,
largestWidth = 14;
this.msg.forEach((a, i) => {
const rectHeight = a.text.length * interval + 3,
rectWidth = a.text[0].length * 9 * k;
a.height = rectHeight + 4 + (i ? 0 : 7);
s += a.height;
tmp.canvas.height = a.height;
tmp.canvas.width = 4 + rectWidth;
if (tmp.canvas.width > largestWidth) largestWidth = tmp.canvas.width;
tmp.globalAlpha = a.alpha;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,rectWidth,rectHeight,5,(i ? undefined : true));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
a.text.forEach((b, j) => tmp.fillText(b,5,13*k+interval*j));
a.image = tmp.getImageData(0,0,4 + rectWidth,a.height);
});
tmp.canvas.height = s;
tmp.canvas.width = largestWidth;
for (let i = 0; i < this.msg.length; i++) {
s -= this.msg[i].height;
tmp.putImageData(this.msg[i].image,(largestWidth-this.msg[i].image.width)/2,s);
}
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
//resolve(image);
this.paintedMsg = image;
stopRendering = false;
//context.drawImage(image, X, Y - image.height);
console.timeEnd('draw')
};
//return image;
}
*/
}
class Cat extends Animal {
constructor(raw = {}) {
super();
this.id = raw.id;
this.sprite = 0;
this.speed = raw.speed;
this.place = raw.place;
this.size = raw.size;
this.h = 140;
this.w = 220;
this.out = Math.floor(this.w / 2 * this.size);
this.wsc = Math.floor(this.w * this.size); | loadImage(`/img/players?r=${raw.skin}`, async a => {
this.skin = await this.combineSkin(a);
stopRendering = false;
});
game.cats.set(raw.id, this);
game.all.push(this);
}
delete() {
const i = game.all.findIndex(a => a == this);
if (i != -1) game.all.splice(i, 1);
game.cats.delete(this.id);
}
combineSkin(img, bits = 0) {
return new Promise(resolve => {
let image = document.createElement('img');
const raw = document.createElement('canvas').getContext('2d'),
result = document.createElement('canvas').getContext('2d'),
tmp = document.createElement('canvas').getContext('2d'),
end = () => {
image = document.createElement('img');
image.src = result.canvas.toDataURL('image/png');
image.onload = () => resolve(image);
};
result.canvas.width = 2640;
result.canvas.height = 280;
raw.canvas.width = 2640;
raw.canvas.height = 700;
raw.drawImage(img,0,0);
let body = raw.getImageData(0,0,2640,140);
result.putImageData(body,0,0);
for (let i = 0; i <= 2420; i += 220)
result.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,0,220,140)), i, 140);
tmp.canvas.width = 2640;
tmp.canvas.height = 280;
tmp.putImageData(raw.getImageData(0, 140, 2640, 140),0,0);
for (let i = 0; i <= 2420; i += 220) {
tmp.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,
(bits & 1 ? 280 : 140),220,140)),i,140);
}
image.src = tmp.canvas.toDataURL('image/png');
image.onload = () => {
result.drawImage(image,0,0);
if (bits & 2) {
tmp.clearRect(0,0,2640,280);
tmp.putImageData(raw.getImageData(0, 320, 2640, 140),0,0);
for (let i = 0; i <= 2420; i += 220)
tmp.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,0,220,140)), i, 140);
image = document.createElement('img');
image.src = tmp.canvas.toDataURL('image/png');
image.onload = () => {
result.drawImage(image,0,0);
end();
}
} else end();
}
});
}
walkAnimation() {
if (this.walkAnimationInterval) return;
this.sprite = 880;
this.walkAnimationInterval = setInterval(() => {
if (this.sprite >= 2420) this.sprite = 880
else this.sprite += 220;
}, 120);
}
stopWalkAnimation() {
this.sprite = 0;
clearInterval(this.walkAnimationInterval);
delete this.walkAnimationInterval;
}
walk(to) {
clearInterval(this.walkInterval);
this.walkAnimation();
const v = game.computeVector(this.place, to),
speed = this.speed / 1000 * 40,
t = v[2] / speed,
speedX = v[0] / t, speedY = v[1] / t;
let gone = 0;
if (v[0] < 0) this.dir = 0
else this.dir = 1;
this.walkInterval = setInterval(() => {
stopRendering = false;
gone += speed;
if (v[2] <= gone) {
this.stopWalk();
this.stopWalkAnimation();
return;
}
this.place[0] += speedX;
this.place[1] += speedY;
}, 40);
}
stopWalk() {
clearInterval(this.walkInterval)
}
}
class Location {
constructor(raw = {}) {
this.area = new Image();
loadImage(`/img/area?r=${raw.area}`, img => {
this.area = img;
this.drawArea();
});
this.fill(raw.fill);
}
fill(raw = []) {
raw.forEach(a => new Cat(a));
}
drawArea() {
const l = game.layer[2],
p = l.createPattern(this.area, 'repeat');
l.fillStyle = p;
l.fillRect(0, h * 0.55, w, h);
}
clear() {
game.cats.forEach(cat => {
cat.delete();
});
stopRendering = false;
}
}
async function get(url, options = {}) {
const res = await fetch(url, options);
if (options.text) return await res.text()
else return await res.json();
}
function send(code, msg) {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ code, msg }));
return true;
}
}
function loadImage(path, f = a => a) {
const img = new Image();
img.src = path;
img.onload = () => f(img);
}
/*
game.computeVectorsStart = msg => {
if (msg.t - Date.now() <= 0) return msg.to;
const full = game.computeVector(msg.from, msg.to),
larger = Math.abs(Math.abs(full[0]) > Math.abs(full[1]) ? full[0] : full[1]),
stepX = full[0] / larger, stepY = full[1] / larger;
for (let i = 0, x = msg.to[0] - stepX, y = msg.to[1] - stepY; i <= larger; i++, x -= stepX, y-= stepY) {
if (game.computeVector([x, y], msg.to)[2] / 16 * 1000 > msg.t - Date.now()) return [x, y];
}
}
*/
game.openConnection();
game.initCanvas();
document.body.onclick = e => {
game.spaces.forEach(s => {
if (game.space(s, e.clientX, e.clientY)) s.react(e.clientX, e.clientY);
});
} | this.hsc = Math.floor(this.h * this.size);
this.customTextSize = 0;
this.dir = raw.dir;
//присвоивать дефолтную картинку, а потом подгружать новую
this.skin = new Image(); | random_line_split |
canvas_play.js | 'use strict'
let ws, x, y, w, h, ID, stopRendering = false;
class Time {
constructor() {
this.one = 1800;
}
updateTime(newTime) {
clearInterval(this.interval);
this.nw = newTime.slice(0, -1);
setTimeout(() =>
this.interval = setInterval(() => this.upOne(), this.one * 1000),
(Math.floor(this.nw[0] / this.one) + 1) * this.one - this.nw[0]);
}
upOne() {
if (this.nw[4] < 3) this.nw[4]++
else {
if (this.nw[3] < 29) this.nw[3]++
else {
if (this.nw[2] < 11) this.nw[2]++
else {
this.nw[1]++;
this.nw[2] = 0;
}
this.nw[3] = 0;
}
this.nw[4] = 0;
}
}
getSeason() {
switch (this.nw[2]) {
case 2: case 3: case 4: return 0;
case 5: case 6: case 7: return 1;
case 8: case 9: case 10: return 2;
case 11: case 0: case 1: return 3;
}
}
getMoonPhase() {
const phase = this.nw[3] / 29;
if (phase == 0) return 0;
if (phase < 0.14) return 1;
if (phase < 0.48) return 2;
if (phase < 0.52) return 3;
if (phase < 0.96) return 4;
return 5;
}
getDateAsString() {
let string = (() => {
let s = this.nw[2];
s = s <= 2 ? s :
s <= 5 ? s - 3 :
s <= 8 ? s - 6 : s - 9;
s /= 3;
if (s < 0.1) return 'Начало';
if (s < 0.5) return 'Первая половина';
if (s < 0.9) return 'Вторая половина';
return 'Конец'
})();
switch (this.getSeason()) {
case 0: string += ' сезона Голых Деревьев, '; break;
case 1: string += ' сезона Юных Листьев, '; break;
case 2: string += ' сезона Зелёных Деревьев, '; break;
case 3: string += ' листопада, '; break;
}
switch (this.getMoonPhase()) {
case 0: return string += 'новолуние';
case 1: return string += 'растущая луна';
case 2: return string += 'первая половина луны';
case 3: return string += 'половина луны';
case 4: return string += 'вторая половина луны';
case 5: return string += 'полнолуние';
}
}
}
class AdditionToCanvas {
rotateHorizontally(img) {
const d = img.data;
for (let i = 0; i < img.height; i++) {
for (let j = 0; j < img.width / 2; j++) {
const p0 = i * img.width * 4 + j * 4,
p1 = i * img.width * 4 + (img.width - j) * 4;
for (let k = 0; k < 4; k++) {
const t = d[p0 + k];
d[p0 + k] = d[p1 + k];
d[p1 + k] = t;
}
}
}
return img;
}
}
class Game {
constructor() {
//super();
this.host = location.origin.replace(/^http/, 'ws');
this.time = new Time();
this.layer = [
document.getElementById('zero-layer').getContext('2d'),
document.getElementById('first-layer').getContext('2d'),
document.getElementById('under-layer').getContext('2d'),
document.getElementById('tmp').getContext('2d')
]
this.canvas = new AdditionToCanvas();
this.scale = 1;
this.nowl = 0;
this.area = {
type: 0,
react: (X, Y) => {
send(102, [X / x, (Y - h * 0.55) / y]);
}
}
this.colorOfInterface = {
main: '#9b6c40',
text: '#000000'
}
this.all = [];
this.cats = new Map();
this.spaces = new Set([this.area]);
}
cookSpace(data, f = a => a) {
data.react = f;
this.spaces.add(data);
return data;
}
deleteSpace(link) {
return this.spaces.delete(link);
}
computeVector(A, B) {
return [
B[0] - A[0],
B[1] - A[1],
Math.round(Math.sqrt(Math.pow(B[0] - A[0], 2) + Math.pow(B[1] - A[1], 2)))
];
}
serveText(text, maxTextLength = 1) {
maxTextLength = Math.floor(maxTextLength * 40);
const result = [''], max = Math.floor(maxTextLength / 2);
text = text.match(/\s*[\S]+\s*/g);
for (let i = 0; i < text.length; i++) {
const s = text[i];
if (s.length > max) {
text.splice(i + 1, 0, s.slice(0, max), s.slice(max + 1));
continue;
}
if (result[result.length - 1].length + s.length > maxTextLength) {
result.push(s);
} else result[result.length - 1] += s;
}
return result;
}
initCanvas() {
w = document.documentElement.clientWidth * this.scale;
h = document.documentElement.clientHeight * this.scale;
let zero = this.layer[0],
first = this.layer[1],
under = this.layer[2],
totalHight = h * 0.85; //0.3 + 0.55
zero.canvas.width = w;
zero.canvas.height = totalHight;
first.canvas.width = w;
first.canvas.height = totalHight;
under.canvas.width = w;
under.canvas.height = totalHight;
this.area.range = [0,h*0.55,w,totalHight]
x = w / 160;
y = h * 0.3 / 27;
}
async render() {
console.time('render')
if (stopRendering) return requestAnimationFrame(game.render);
//при изменениее game.scale:
//1) перерисовать under-layer
//2) пересчитать параметры
const l = game.layer[game.nowl],
areaHight = h * 0.55;
game.all.sort((a,b) => a.place[1] - b.place[1]);
game.all.forEach(i => {
const placeX = Math.floor(i.place[0] * x),
placeY = Math.floor(areaHight + i.place[1] * y);
l.drawImage(i.skin, i.sprite, i.dir ? 0 : i.h,
i.w, i.h, placeX - i.out,
placeY - i.hsc,
i.wsc, i.hsc);
// if (i.paintedMsg)
// l.drawImage(i.paintedMsg, Math.floor(i.place[0] * x - i.paintedMsg.width/2/*- i.out*/),
// Math.floor(areaHight + i.place[1] * y - i.hsc - i.paintedMsg.height));
if (i.msg) {
for (let j = 0, p = 0; j < i.msg.length; j++) {
console.log(i.msg[j])
l.drawImage(i.msg[j].image, placeX, placeY - i.hsc - p);
p += i.msg[j].height;
}
}
});
game.nowl ^= 1;
game.layer[game.nowl].clearRect(0, 0, w, h);
requestAnimationFrame(game.render);
stopRendering = true;
console.timeEnd('render')
}
space(s = {}, X, Y) {
switch (s.type) {
case 0:
s = s.range;
if (X >= s[0] && X <= s[2] &&
Y >= s[1] && Y <= s[3]) return true;
break;
}
}
openConnection() {
//if a connection already exists, try to close the connection
if (ws) ws.close();
ws = new WebSocket(this.host, 'play');
ws.onopen = () => {
get('/getCookie').then(res => {
if (res.code == 1) send(100, res.headers)
else console.log('Ошибка авторизации');
});
}
ws.onmessage = e => {
const {code, msg} = JSON.parse(e.data);
console.log({code,msg});
switch (code) {
case 100:
game.time.updateTime(msg.time);
game.location = new Location(msg.loc);
ID = msg.id;
requestAnimationFrame(game.render);
break;
case 101:
game.cats.get(msg.id).addMsg(msg.text);
break;
case 104:
game.cats.get(msg.id).walk(msg.msg);
break;
case 105:
game.location.clear();
game.location.fill(msg.fill);
break;
case 107:
if (game.cats.get(msg.id)) return;
new Cat(msg);
break;
case 108:
game.time.updateTime(msg);
break;
}
}
}
}
var game = new Game();
class Animal {
constructor(raw = {}) {
this.msg = [];
}
roundedRect(c, x, y, width, height, radius, t) {
c.beginPath();
c.moveTo(x,y+radius);
c.lineTo(x,y+height-radius);
c.quadraticCurveTo(x,y+height,x+radius,y+height);
if (t) {
c.lineTo(x+width/2-4,y+height);
c.lineTo(x+width/2,y+height+7);
c.lineTo(x+width/2+4,y+height);
}
c.lineTo(x+width-radius,y+height);
c.quadraticCurveTo(x+width,y+height,x+width,y+height-radius);
c.lineTo(x+width,y+radius);
c.quadraticCurveTo(x+width,y,x+width-radius,y);
c.lineTo(x+radius,y);
c.quadraticCurveTo(x,y,x,y+radius);
c.stroke();
}
deleteMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
if (i == 9) {
this.msg.pop();
link.killed = true;
} else link.sprite[0] = link.width * i;
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.deleteMsg(this.msg[this.msg.length - 1]);
const m = {}
m.text = game.serveText(msg, this.size + this.customTextSize);
m.sprite = [];
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.deleteMsg(m);
}, 10000);
}
renderMsg(m) {
/*
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = docume | k.alpha = 1.0 - i * i / 81;
this.shiftAlphaMsg(link);
}
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.extMsg(this.msg[this.msg.length-1]);
const m = {}
//m.alpha = 1.0;
m.text = game.serveText(msg);
this.drawMsg(m, 1);
if (this.msg[0]) this.drawMsg(this.msg[0]);
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.extMsg(m);
}, 10000);
//this.drawMsg();
} */ /*
shiftAlphaMsg(m) {
const tmp = game.layer[3];
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.globalAlpha = m.alpha;
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
m.image = image;
stopRendering = false;
}
} */
/*
drawMsg(m, newMsg) {
console.log(m)
if (newMsg) m.k = 0.8 + 0.2 * this.size + this.customTextSize;
const tmp = game.layer[3],
interval = 12 * m.k;
if (newMsg) {
m.rectHeight = m.text.length * interval + 3,
m.rectWidth = m.text[0].length * 9 * m.k;
}
m.height = m.rectHeight + (newMsg ? 11 : 4);
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,m.rectWidth,m.rectHeight,5,(newMsg ? true : false));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = document.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
/*
drawMsg() {
if (this.msg.length == 0) {
delete this.paintedMsg;
stopRendering = false;
return;
}
console.time('draw')
const k = 0.8 + 0.2 * this.size + this.customTextSize,
tmp = game.layer[3];
let interval = 12 * k,
s = 0,
largestWidth = 14;
this.msg.forEach((a, i) => {
const rectHeight = a.text.length * interval + 3,
rectWidth = a.text[0].length * 9 * k;
a.height = rectHeight + 4 + (i ? 0 : 7);
s += a.height;
tmp.canvas.height = a.height;
tmp.canvas.width = 4 + rectWidth;
if (tmp.canvas.width > largestWidth) largestWidth = tmp.canvas.width;
tmp.globalAlpha = a.alpha;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,rectWidth,rectHeight,5,(i ? undefined : true));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
a.text.forEach((b, j) => tmp.fillText(b,5,13*k+interval*j));
a.image = tmp.getImageData(0,0,4 + rectWidth,a.height);
});
tmp.canvas.height = s;
tmp.canvas.width = largestWidth;
for (let i = 0; i < this.msg.length; i++) {
s -= this.msg[i].height;
tmp.putImageData(this.msg[i].image,(largestWidth-this.msg[i].image.width)/2,s);
}
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
//resolve(image);
this.paintedMsg = image;
stopRendering = false;
//context.drawImage(image, X, Y - image.height);
console.timeEnd('draw')
};
//return image;
}
*/
}
class Cat extends Animal {
constructor(raw = {}) {
super();
this.id = raw.id;
this.sprite = 0;
this.speed = raw.speed;
this.place = raw.place;
this.size = raw.size;
this.h = 140;
this.w = 220;
this.out = Math.floor(this.w / 2 * this.size);
this.wsc = Math.floor(this.w * this.size);
this.hsc = Math.floor(this.h * this.size);
this.customTextSize = 0;
this.dir = raw.dir;
//присвоивать дефолтную картинку, а потом подгружать новую
this.skin = new Image();
loadImage(`/img/players?r=${raw.skin}`, async a => {
this.skin = await this.combineSkin(a);
stopRendering = false;
});
game.cats.set(raw.id, this);
game.all.push(this);
}
delete() {
const i = game.all.findIndex(a => a == this);
if (i != -1) game.all.splice(i, 1);
game.cats.delete(this.id);
}
combineSkin(img, bits = 0) {
return new Promise(resolve => {
let image = document.createElement('img');
const raw = document.createElement('canvas').getContext('2d'),
result = document.createElement('canvas').getContext('2d'),
tmp = document.createElement('canvas').getContext('2d'),
end = () => {
image = document.createElement('img');
image.src = result.canvas.toDataURL('image/png');
image.onload = () => resolve(image);
};
result.canvas.width = 2640;
result.canvas.height = 280;
raw.canvas.width = 2640;
raw.canvas.height = 700;
raw.drawImage(img,0,0);
let body = raw.getImageData(0,0,2640,140);
result.putImageData(body,0,0);
for (let i = 0; i <= 2420; i += 220)
result.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,0,220,140)), i, 140);
tmp.canvas.width = 2640;
tmp.canvas.height = 280;
tmp.putImageData(raw.getImageData(0, 140, 2640, 140),0,0);
for (let i = 0; i <= 2420; i += 220) {
tmp.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,
(bits & 1 ? 280 : 140),220,140)),i,140);
}
image.src = tmp.canvas.toDataURL('image/png');
image.onload = () => {
result.drawImage(image,0,0);
if (bits & 2) {
tmp.clearRect(0,0,2640,280);
tmp.putImageData(raw.getImageData(0, 320, 2640, 140),0,0);
for (let i = 0; i <= 2420; i += 220)
tmp.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,0,220,140)), i, 140);
image = document.createElement('img');
image.src = tmp.canvas.toDataURL('image/png');
image.onload = () => {
result.drawImage(image,0,0);
end();
}
} else end();
}
});
}
walkAnimation() {
if (this.walkAnimationInterval) return;
this.sprite = 880;
this.walkAnimationInterval = setInterval(() => {
if (this.sprite >= 2420) this.sprite = 880
else this.sprite += 220;
}, 120);
}
stopWalkAnimation() {
this.sprite = 0;
clearInterval(this.walkAnimationInterval);
delete this.walkAnimationInterval;
}
walk(to) {
clearInterval(this.walkInterval);
this.walkAnimation();
const v = game.computeVector(this.place, to),
speed = this.speed / 1000 * 40,
t = v[2] / speed,
speedX = v[0] / t, speedY = v[1] / t;
let gone = 0;
if (v[0] < 0) this.dir = 0
else this.dir = 1;
this.walkInterval = setInterval(() => {
stopRendering = false;
gone += speed;
if (v[2] <= gone) {
this.stopWalk();
this.stopWalkAnimation();
return;
}
this.place[0] += speedX;
this.place[1] += speedY;
}, 40);
}
stopWalk() {
clearInterval(this.walkInterval)
}
}
class Location {
constructor(raw = {}) {
this.area = new Image();
loadImage(`/img/area?r=${raw.area}`, img => {
this.area = img;
this.drawArea();
});
this.fill(raw.fill);
}
fill(raw = []) {
raw.forEach(a => new Cat(a));
}
drawArea() {
const l = game.layer[2],
p = l.createPattern(this.area, 'repeat');
l.fillStyle = p;
l.fillRect(0, h * 0.55, w, h);
}
clear() {
game.cats.forEach(cat => {
cat.delete();
});
stopRendering = false;
}
}
async function get(url, options = {}) {
const res = await fetch(url, options);
if (options.text) return await res.text()
else return await res.json();
}
function send(code, msg) {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ code, msg }));
return true;
}
}
function loadImage(path, f = a => a) {
const img = new Image();
img.src = path;
img.onload = () => f(img);
}
/*
game.computeVectorsStart = msg => {
if (msg.t - Date.now() <= 0) return msg.to;
const full = game.computeVector(msg.from, msg.to),
larger = Math.abs(Math.abs(full[0]) > Math.abs(full[1]) ? full[0] : full[1]),
stepX = full[0] / larger, stepY = full[1] / larger;
for (let i = 0, x = msg.to[0] - stepX, y = msg.to[1] - stepY; i <= larger; i++, x -= stepX, y-= stepY) {
if (game.computeVector([x, y], msg.to)[2] / 16 * 1000 > msg.t - Date.now()) return [x, y];
}
}
*/
game.openConnection();
game.initCanvas();
document.body.onclick = e => {
game.spaces.forEach(s => {
if (game.space(s, e.clientX, e.clientY)) s.react(e.clientX, e.clientY);
});
}
| nt.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
console.time('draw')
const tmp = game.layer[3],
k = 0.8 + 0.2 * this.size + this.customTextSize,
interval = 12 * k,
rectHeight = m.text.length * interval + 4 * k,
rectWidth = m.text[0].length * 9 * k,
height = rectHeight + 4,
width = rectWidth + 4,
canvasHeight = height * 2 + 7,
last = width * 6;
console.log(m.text)
tmp.canvas.height = canvasHeight;
tmp.canvas.width = last;
for (let i = 0; i < 2; i++) {
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp, 2, (i ? 2 : 9 + height), rectWidth, rectHeight, 5, i);
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b, 5, 13 * k + interval * j + (i ? 0 : 7 + height), rectWidth));
}
for (let i = width, alpha = 0.7; i < last; i += width, alpha *= 0.6) {
const result = tmp.getImageData(0, 0, width, canvasHeight),
r = result.data;
for (let j = 3; j < r.length; j += 4) {
r[j] *= alpha;
}
tmp.putImageData(result, i, 0);
}
console.timeEnd('draw')
return tmp.canvas;
}
/*
extMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
console.log(link.alpha + ': ' + link.text[0]);
if (i == 9) {
this.msg.pop();
link.killed = true;
} else {
lin | identifier_body |
canvas_play.js | 'use strict'
let ws, x, y, w, h, ID, stopRendering = false;
class Time {
constructor() {
this.one = 1800;
}
updateTime(newTime) {
clearInterval(this.interval);
this.nw = newTime.slice(0, -1);
setTimeout(() =>
this.interval = setInterval(() => this.upOne(), this.one * 1000),
(Math.floor(this.nw[0] / this.one) + 1) * this.one - this.nw[0]);
}
upOne() {
if (this.nw[4] < 3) this.nw[4]++
else {
if (this.nw[3] < 29) this.nw[3]++
else {
if (this.nw[2] < 11) this.nw[2]++
else {
this.nw[1]++;
this.nw[2] = 0;
}
this.nw[3] = 0;
}
this.nw[4] = 0;
}
}
getSeason() {
switch (this.nw[2]) {
case 2: case 3: case 4: return 0;
case 5: case 6: case 7: return 1;
case 8: case 9: case 10: return 2;
case 11: case 0: case 1: return 3;
}
}
getMoonPhase() {
const phase = this.nw[3] / 29;
if (phase == 0) return 0;
if (phase < 0.14) return 1;
if (phase < 0.48) return 2;
if (phase < 0.52) return 3;
if (phase < 0.96) return 4;
return 5;
}
getDateAsString() {
let string = (() => {
let s = this.nw[2];
s = s <= 2 ? s :
s <= 5 ? s - 3 :
s <= 8 ? s - 6 : s - 9;
s /= 3;
if (s < 0.1) return 'Начало';
if (s < 0.5) return 'Первая половина';
if (s < 0.9) return 'Вторая половина';
return 'Конец'
})();
switch (this.getSeason()) {
case 0: string += ' сезона Голых Деревьев, '; break;
case 1: string += ' сезона Юных Листьев, '; break;
case 2: string += ' сезона Зелёных Деревьев, '; break;
case 3: string += ' листопада, '; break;
}
switch (this.getMoonPhase()) {
case 0: return string += 'новолуние';
case 1: return string += 'растущая луна';
case 2: return string += 'первая половина луны';
case 3: return string += 'половина луны';
case 4: return string += 'вторая половина луны';
case 5: return string += 'полнолуние';
}
}
}
class AdditionToCanvas {
rotateHorizontally(img) {
const d = img.data;
for (let i = 0; i < img.height; i++) {
for (let j = 0; j < img.width / 2; j++) {
const p0 = i * img.width * 4 + j * 4,
p1 = i * img.width * 4 + (img.width - j) * 4;
for (let k = 0; k < 4; k++) {
const t = d[p0 + k];
d[p0 + k] = d[p1 + k];
d[p1 + k] = t;
}
}
}
return img;
}
}
class Game {
constructor() {
//super();
this.host = location.origin.replace(/^http/, 'ws');
this.time = new Time();
this.layer = [
document.getElementById('zero-layer').getContext('2d'),
document.getElementById('first-layer').getContext('2d'),
document.getElementById('under-layer').getContext('2d'),
document.getElementById('tmp').getContext('2d')
]
this.canvas = new AdditionToCanvas();
this.scale = 1;
this.nowl = 0;
this.area = {
type: 0,
react: (X, Y) => {
send(102, [X / x, (Y - h * 0.55) / y]);
}
}
this.colorOfInterface = {
main: '#9b6c40',
text: '#000000'
}
this.all = [];
this.cats = new Map();
this.spaces = new Set([this.area]);
}
cookSpace(data, f = a => a) {
data.react = f;
this.spaces.add(data);
return data;
}
deleteSpace(link) {
return this.spaces.delete(link);
}
computeVector(A, B) {
return [
B[0] - A[0],
B[1] - A[1],
Math.round(Math.sqrt(Math.pow(B[0] - A[0], 2) + Math.pow(B[1] - A[1], 2)))
];
}
serveText(text, maxTextLength = 1) {
maxTextLength = Math.floor(maxTextLength * 40);
const result = [''], max = Math.floor(maxTextLength / 2);
text = text.match(/\s*[\S]+\s*/g);
for (let i = 0; i < text.length; i++) {
const s = text[i];
if (s.length > max) {
text.splice(i + 1, 0, s.slice(0, max), s.slice(max + 1));
continue;
}
if (result[result.length - 1].length + s.length > maxTextLength) {
result.push(s);
} else result[result.length - 1] += s;
}
return result;
}
initCanvas() {
w = document.documentElement.clientWidth * this.scale;
h = document.documentElement.clientHeight * this.scale;
let zero = this.layer[0],
first = this.layer[1],
under = this.layer[2],
totalHight = h * 0.85; //0.3 + 0.55
zero.canvas.width = w;
zero.canvas.height = totalHight;
first.canvas.width = w;
first.canvas.height = totalHight;
under.canvas.width = w;
under.canvas.height = totalHight;
this.area.range = [0,h*0.55,w,totalHight]
x = w / 160;
y = h * 0.3 / 27;
}
async render() {
console.time('render')
if (stopRendering) return requestAnimationFrame(game.render);
//при изменениее game.scale:
//1) перерисовать under-layer
//2) пересчитать параметры
const l = game.layer[game.nowl],
areaHight = h * 0.55;
game.all.sort((a,b) => a.place[1] - b.place[1]);
game.all.forEach(i => {
const placeX = Math.floor(i.place[0] * x),
placeY = Math.floor(areaHight + i.place[1] * y);
l.drawImage(i.skin, i.sprite, i.dir ? 0 : i.h,
i.w, i.h, placeX - i.out,
placeY - i.hsc,
i.wsc, i.hsc);
// if (i.paintedMsg)
// l.drawImage(i.paintedMsg, Math.floor(i.place[0] * x - i.paintedMsg.width/2/*- i.out*/),
// Math.floor(areaHight + i.place[1] * y - i.hsc - i.paintedMsg.height));
if (i.msg) {
for (let j = 0, p = 0; j < i.msg.length; j++) {
console.log(i.msg[j])
l.drawImage(i.msg[j].image, placeX, placeY - i.hsc - p);
p += i.msg[j].height;
}
}
});
game.nowl ^= 1;
game.layer[game.nowl].clearRect(0, 0, w, h);
requestAnimationFrame(game.render);
stopRendering = true;
console.timeEnd('render')
}
space(s = {}, X, Y) {
switch (s.type) {
case 0:
s = s.range;
if (X >= s[0] && X <= s[2] &&
Y >= s[1] && Y <= s[3]) return true;
break;
}
}
openConnection() {
//if a connection already exists, try to close the connection
if (ws) ws.close();
ws = new WebSocket(this.host, 'play');
ws.onopen = () => {
get('/getCookie').then(res => {
if (res.code == 1) send(100, res.headers)
else console.log('Ошибка авторизации');
});
}
ws.onmessage = e => {
const {code, msg} = JSON.parse(e.data);
console.log({code,msg});
switch (code) {
case 100:
game.time.updateTime(msg.time);
game.location = new Location(msg.loc);
ID = msg.id;
requestAnimationFrame(game.render);
break;
case 101:
game.cats.get(msg.id).addMsg(msg.text);
break;
case 104:
game.cats.get(msg.id).walk(msg.msg);
break;
case 105:
game.location.clear();
game.location.fill(msg.fill);
break;
case 107:
if (game.cats.get(msg.id)) return;
new Cat(msg);
break;
case 108:
game.time.updateTime(msg);
break;
}
}
}
}
var game = new Game();
class Animal {
constructor(raw = {}) {
this.msg = [];
}
roundedRect(c, x, y, width, height, radius, t) {
c.beginPath();
c.moveTo(x,y+radius);
c.lineTo(x,y+height-radius);
c.quadraticCurveTo(x,y+height,x+radius,y+height);
if (t) {
c.lineTo(x+width/2-4,y+height);
c.lineTo(x+width/2,y+height+7);
c.lineTo(x+width/2+4,y+height);
}
c.lineTo(x+width-radius,y+height);
c.quadraticCurveTo(x+width,y+height,x+width,y+height-radius);
c.lineTo(x+width,y+radius);
c.quadraticCurveTo(x+width,y,x+width-radius,y);
c.lineTo(x+radius,y);
c.quadraticCurveTo(x,y,x,y+radius);
c.stroke();
}
deleteMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
if (i == 9) {
this.msg.pop();
link.killed = true;
} else link.sprite[0] = link.width * i;
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.deleteMsg(this.msg[this.msg.length - 1]);
const m = {}
m.text = game.serveText(msg, this.size + this.customTextSize);
m.sprite = [];
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.deleteMsg(m);
}, 10000);
}
renderMsg(m) {
/*
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = document.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
console.time('draw')
const tmp = game.layer[3],
k = 0.8 + 0.2 * this.size + this.customTextSize,
interval = 12 * k,
rectHeight = m.text.length * interval + 4 * k,
rectWidth = m.text[0].length * 9 * k,
height = rectHeight + 4,
width = rectWidth + 4,
canvasHeight = height * 2 + 7,
last = width * 6;
console.log(m.text)
tmp.canvas.height = canvasHeight;
tmp.canvas.width = last;
for (let i = 0; i < 2; i++) {
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp, 2, (i ? 2 : 9 + height), rectWidth, rectHeight, 5, i);
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b, 5, 13 * k + interval * j + (i ? 0 : 7 + height), rectWidth));
}
for (let i = width, alpha = 0.7; i < last; i += width, alpha *= 0.6) {
const result = tmp.getImageData(0, 0, width, canvasHeight),
r = result.data;
for (let j = 3; j < r.length; j += 4) {
r[j] *= alpha;
}
tmp.putImageData(result, i, 0);
}
console.timeEnd('draw')
return tmp.canvas;
}
/*
extMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
console.log(link.alpha + ': ' + link.text[0]);
if (i == 9) {
this.msg.pop();
link.killed = true;
} else {
link.alpha = 1.0 - i * i / 81;
this.shiftAlphaMsg(link);
}
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.extMsg(this.msg[this.msg.length-1]);
const m = {}
//m.alpha = 1.0;
m.text = game.serveText(msg);
this.drawMsg(m, 1);
if (this.msg[0]) this.drawMsg(this.msg[0]);
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.extMsg(m);
}, 10000);
//this.drawMsg();
} */ /*
shiftAlphaMsg(m) {
const tmp = game.layer[3];
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.globalAlpha = m.alpha;
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
m.image = image;
stopRendering = false;
}
} */
/*
drawMsg(m, newMsg) {
console.log(m)
if (newMsg) m.k = 0.8 + 0.2 * this.size + this.customTextSize;
const tmp = game.layer[3],
interval = 12 * m.k;
if (newMsg) {
m.rectHeight = m.text.length * interval + 3,
m.rectWidth = m.text[0].length * 9 * m.k;
}
m.height = m.rectHeight + (newMsg ? 11 : 4);
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,m.rectWidth,m.rectHeight,5,(newMsg ? true : false));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = document.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
/*
drawMsg() {
if (this.msg.length == 0) {
delete this.paintedMsg;
stopRendering = false;
return;
}
console.time('draw')
const k = 0.8 + 0.2 * this.size + this.customTextSize,
tmp = game.layer[3];
let interval = 12 * k,
s = 0,
largestWidth = 14;
this.msg.forEach((a, i) => {
const rectHeight = a.text.length * interval + 3,
rectWidth = a.text[0].length * 9 * k;
a.height = rectHeight + 4 + (i ? 0 : 7);
s += a.height;
tmp.canvas.height = a.height;
tmp.canvas.width = 4 + rectWidth;
if (tmp.canvas.width > largestWidth) largestWidth = tmp.canvas.width;
tmp.globalAlpha = a.alpha;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,rectWidth,rectHeight,5,(i ? undefined : true));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
a.text.forEach((b, j) => tmp.fillText(b,5,13*k+interval*j));
a.image = tmp.getImageData(0,0,4 + rectWidth,a.height);
});
tmp.canvas.height = s;
tmp.canvas.width = largestWidth;
for (let i = 0; i < this.msg.length; i++) {
s -= this.msg[i].height;
tmp.putImageData(this.msg[i].image,(largestWidth-this.msg[i].image.width)/2,s);
}
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
//resolve(image);
this.paintedMsg = image;
stopRendering = false;
//context.drawImage(image, X, Y - image.height);
console.timeEnd('draw')
};
//return image;
}
*/
}
class Cat extends Animal {
constructor(raw = {}) {
super();
this.id = raw.id;
this.sprite = 0;
this.speed = raw.speed;
this.place = raw.place;
this.size = raw.size;
this.h = 140;
this.w = 220;
this.out = Math.floor(this.w / 2 * this.size);
this.wsc = Math.floor(this.w * this.size);
this.hsc = Math.floor(this.h * this.size);
this.customTextSize = 0;
this.dir = raw.dir;
//присвоивать дефолтную картинку, а потом подгружать новую
this.skin = new Image();
loadImage(`/img/players?r=${raw.skin}`, async a => {
this.skin = await this.combineSkin(a);
stopRendering = false;
});
game.cats.set(raw.id, this);
game.all.push(this);
}
delete() {
const i = game.all.findIndex(a => a == this);
if (i != -1) game.all.splice(i, 1);
game.cats.delete(this.id);
}
combineSkin(img, bits = 0) {
return new Promise(resolve => {
let image = document.createElement('img');
const raw = document.createElement('canvas').getContext('2d'),
result = document.createElement('canvas').getContext('2d'),
tmp = document.createElement('canvas').getContext('2d'),
end = () => {
image = document.createElement('img');
image.src = result.canvas.toDataURL('image/png');
image.onload = () => resolve(image);
};
result.canvas.width = 2640;
result.canvas.height = 280;
raw.canvas.width = 2640;
raw.canvas.height = 700;
raw.drawImage(img,0,0);
let body = raw.getImageData(0,0,2640,140);
result.putImageData(body,0,0);
for (let i = 0; i <= 2420; i += 220)
result.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,0,220,140)), i, 140);
tmp.canvas.width = 2640;
tmp.canvas.height = 280;
tmp.putImageData(raw.getImageData(0, 140, 2640, 140),0,0);
for (let i = 0; i <= 2420; i += 220) {
tmp.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,
(bits & 1 ? 280 : 140),220,140)),i,140);
}
image.src = tmp.canvas.toDataURL('image/png');
image.onload = () => {
result.drawImage(image,0,0);
if (bits & 2) {
tmp.clearRect(0,0,2640,280);
tmp.putImageData(raw.getImageData(0, 320, 2640, 140),0,0);
for (let i = 0; i <= 2420; i += 220)
tmp.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,0,220,140)), i, 140);
image = document.createElement('img');
image.src = tmp.canvas.toDataURL('image/png');
image.onload = () => {
result.drawImage(image,0,0);
end();
}
} else end();
}
});
}
walkAnimation() {
if (this.walkAnimationInterval) return;
this.sprite = 880;
this.walkAnimationInterval = setInterval(() => {
if (this.sprite >= 2420) this.sprite = 880
else this.sprite += 220;
}, 120);
}
stopWalkAnimation() {
this.sprite = 0;
clearInterval(this.walkAnimationInterval);
delete this.walkAnimationInterval;
}
walk(to) {
clearInterval(this.walkInterval);
this.walkAnimation();
const v = game.computeVector(this.place, to),
speed = this.speed / 1000 * 40,
t = v[2] / speed,
speedX = v[0] / t, speedY = v[1] / t;
let gone = 0;
if (v[0] < 0) this.dir = 0
else this.dir = 1;
this.walkInterval = setInterval(() => {
stopRendering = false;
gone += speed;
if (v[2] <= gone) {
this.stopWalk();
this.stopWalkAnimation();
return;
}
this.place[0] += speedX;
this.place[1] += speedY;
}, 40);
}
stopWalk() {
clearInterval(this.walkInterval)
}
}
class Location {
constructor(raw = {}) {
this.area = new Image();
loadImage(`/img/area?r=${raw.area}`, img => {
this.area = img;
this.drawArea();
});
this.fill(raw.fill);
}
fill(raw = []) {
raw.forEach(a => new Cat(a));
}
drawArea() {
const l = game.layer[2],
p = l.createPattern(this.area, 'repeat');
l.fillStyle = p;
l.fillRect(0, h * 0.55, w, h);
}
clear() {
game.cats.forEach(cat => {
cat.delete();
});
stopRendering = false;
}
}
async function get(url, options = {}) {
const res = await fetch(url, options);
if (options.text) return await res.text()
else return await res.json();
}
function send(code, msg) {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ code, msg }));
return true;
}
}
function loadImage(path, f = a => a) {
const img = new Image();
img.src = path;
img.onload = () => f(img);
}
/*
game.computeVectorsStart = msg => {
if (msg.t - Date.now() <= 0) return msg.to;
const full = game.computeVector(msg.from, msg.to),
larger = Math.abs(Math.abs(full[0]) > Math | [1]) ? full[0] : full[1]),
stepX = full[0] / larger, stepY = full[1] / larger;
for (let i = 0, x = msg.to[0] - stepX, y = msg.to[1] - stepY; i <= larger; i++, x -= stepX, y-= stepY) {
if (game.computeVector([x, y], msg.to)[2] / 16 * 1000 > msg.t - Date.now()) return [x, y];
}
}
*/
game.openConnection();
game.initCanvas();
document.body.onclick = e => {
game.spaces.forEach(s => {
if (game.space(s, e.clientX, e.clientY)) s.react(e.clientX, e.clientY);
});
}
| .abs(full | identifier_name |
s_expressions.rs | //! In this tutorial, we will write parser
//! and evaluator of arithmetic S-expressions,
//! which look like this:
//! ```
//! (+ (* 15 2) 62)
//! ```
/// Currently, rowan doesn't have a hook to add your own interner,
/// but `SmolStr` should be a "good enough" type for representing
/// tokens.
/// Additionally, rowan uses `TextUnit` and `TextRange` types to
/// represent utf8 offsets and ranges.
use rowan::SmolStr;
/// Let's start with defining all kinds of tokens and
/// composite nodes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(non_camel_case_types)]
#[repr(u16)]
enum SyntaxKind {
L_PAREN = 0, // '('
R_PAREN, // ')'
WORD, // '+', '15'
WHITESPACE, // whitespaces is explicit
ERROR, // as well as errors
// composite nodes
LIST, // `(+ 2 3)`
ATOM, // `+`, `15`, wraps a WORD token
ROOT, // top-level node: a list of s-expressions
}
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum | {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() {
let sexps = "
92
(+ 62 30)
(/ 92 0)
nan
(+ (* 15 2) 62)
";
let root = parse(sexps);
let res = root.syntax().sexps().map(|it| it.eval()).collect::<Vec<_>>();
eprintln!("{:?}", res);
assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
}
fn lex(text: &str) -> Vec<(SyntaxKind, SmolStr)> {
fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
}
fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
match t.0 {
0 => L_PAREN,
1 => R_PAREN,
2 => WORD,
3 => WHITESPACE,
4 => ERROR,
_ => unreachable!(),
}
}
let lexer = m_lexer::LexerBuilder::new()
.error_token(tok(ERROR))
.tokens(&[
(tok(L_PAREN), r"\("),
(tok(R_PAREN), r"\)"),
(tok(WORD), r"[^\s()]+"),
(tok(WHITESPACE), r"\s+"),
])
.build();
lexer
.tokenize(text)
.into_iter()
.map(|t| (t.len, kind(t.kind)))
.scan(0usize, |start_offset, (len, kind)| {
let s: SmolStr = text[*start_offset..*start_offset + len].into();
*start_offset += len;
Some((kind, s))
})
.collect()
}
| SexpRes | identifier_name |
s_expressions.rs | //! In this tutorial, we will write parser
//! and evaluator of arithmetic S-expressions,
//! which look like this:
//! ```
//! (+ (* 15 2) 62)
//! ```
/// Currently, rowan doesn't have a hook to add your own interner,
/// but `SmolStr` should be a "good enough" type for representing
/// tokens.
/// Additionally, rowan uses `TextUnit` and `TextRange` types to
/// represent utf8 offsets and ranges.
use rowan::SmolStr;
/// Let's start with defining all kinds of tokens and
/// composite nodes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(non_camel_case_types)]
#[repr(u16)]
enum SyntaxKind {
L_PAREN = 0, // '('
R_PAREN, // ')'
WORD, // '+', '15'
WHITESPACE, // whitespaces is explicit
ERROR, // as well as errors
// composite nodes
LIST, // `(+ 2 3)`
ATOM, // `+`, `15`, wraps a WORD token
ROOT, // top-level node: a list of s-expressions
}
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() { | (+ (* 15 2) 62)
";
let root = parse(sexps);
let res = root.syntax().sexps().map(|it| it.eval()).collect::<Vec<_>>();
eprintln!("{:?}", res);
assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
}
fn lex(text: &str) -> Vec<(SyntaxKind, SmolStr)> {
fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
}
fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
match t.0 {
0 => L_PAREN,
1 => R_PAREN,
2 => WORD,
3 => WHITESPACE,
4 => ERROR,
_ => unreachable!(),
}
}
let lexer = m_lexer::LexerBuilder::new()
.error_token(tok(ERROR))
.tokens(&[
(tok(L_PAREN), r"\("),
(tok(R_PAREN), r"\)"),
(tok(WORD), r"[^\s()]+"),
(tok(WHITESPACE), r"\s+"),
])
.build();
lexer
.tokenize(text)
.into_iter()
.map(|t| (t.len, kind(t.kind)))
.scan(0usize, |start_offset, (len, kind)| {
let s: SmolStr = text[*start_offset..*start_offset + len].into();
*start_offset += len;
Some((kind, s))
})
.collect()
} | let sexps = "
92
(+ 62 30)
(/ 92 0)
nan | random_line_split |
s_expressions.rs | //! In this tutorial, we will write parser
//! and evaluator of arithmetic S-expressions,
//! which look like this:
//! ```
//! (+ (* 15 2) 62)
//! ```
/// Currently, rowan doesn't have a hook to add your own interner,
/// but `SmolStr` should be a "good enough" type for representing
/// tokens.
/// Additionally, rowan uses `TextUnit` and `TextRange` types to
/// represent utf8 offsets and ranges.
use rowan::SmolStr;
/// Let's start with defining all kinds of tokens and
/// composite nodes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(non_camel_case_types)]
#[repr(u16)]
enum SyntaxKind {
L_PAREN = 0, // '('
R_PAREN, // ')'
WORD, // '+', '15'
WHITESPACE, // whitespaces is explicit
ERROR, // as well as errors
// composite nodes
LIST, // `(+ 2 3)`
ATOM, // `+`, `15`, wraps a WORD token
ROOT, // top-level node: a list of s-expressions
}
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() |
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() {
let sexps = "
92
(+ 62 30)
(/ 92 0)
nan
(+ (* 15 2) 62)
";
let root = parse(sexps);
let res = root.syntax().sexps().map(|it| it.eval()).collect::<Vec<_>>();
eprintln!("{:?}", res);
assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
}
fn lex(text: &str) -> Vec<(SyntaxKind, SmolStr)> {
fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
}
fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
match t.0 {
0 => L_PAREN,
1 => R_PAREN,
2 => WORD,
3 => WHITESPACE,
4 => ERROR,
_ => unreachable!(),
}
}
let lexer = m_lexer::LexerBuilder::new()
.error_token(tok(ERROR))
.tokens(&[
(tok(L_PAREN), r"\("),
(tok(R_PAREN), r"\)"),
(tok(WORD), r"[^\s()]+"),
(tok(WHITESPACE), r"\s+"),
])
.build();
lexer
.tokenize(text)
.into_iter()
.map(|t| (t.len, kind(t.kind)))
.scan(0usize, |start_offset, (len, kind)| {
let s: SmolStr = text[*start_offset..*start_offset + len].into();
*start_offset += len;
Some((kind, s))
})
.collect()
}
| {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
} | identifier_body |
s_expressions.rs | //! In this tutorial, we will write parser
//! and evaluator of arithmetic S-expressions,
//! which look like this:
//! ```
//! (+ (* 15 2) 62)
//! ```
/// Currently, rowan doesn't have a hook to add your own interner,
/// but `SmolStr` should be a "good enough" type for representing
/// tokens.
/// Additionally, rowan uses `TextUnit` and `TextRange` types to
/// represent utf8 offsets and ranges.
use rowan::SmolStr;
/// Let's start with defining all kinds of tokens and
/// composite nodes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(non_camel_case_types)]
#[repr(u16)]
enum SyntaxKind {
L_PAREN = 0, // '('
R_PAREN, // ')'
WORD, // '+', '15'
WHITESPACE, // whitespaces is explicit
ERROR, // as well as errors
// composite nodes
LIST, // `(+ 2 3)`
ATOM, // `+`, `15`, wraps a WORD token
ROOT, // top-level node: a list of s-expressions
}
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else |
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() {
let sexps = "
92
(+ 62 30)
(/ 92 0)
nan
(+ (* 15 2) 62)
";
let root = parse(sexps);
let res = root.syntax().sexps().map(|it| it.eval()).collect::<Vec<_>>();
eprintln!("{:?}", res);
assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
}
fn lex(text: &str) -> Vec<(SyntaxKind, SmolStr)> {
fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
}
fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
match t.0 {
0 => L_PAREN,
1 => R_PAREN,
2 => WORD,
3 => WHITESPACE,
4 => ERROR,
_ => unreachable!(),
}
}
let lexer = m_lexer::LexerBuilder::new()
.error_token(tok(ERROR))
.tokens(&[
(tok(L_PAREN), r"\("),
(tok(R_PAREN), r"\)"),
(tok(WORD), r"[^\s()]+"),
(tok(WHITESPACE), r"\s+"),
])
.build();
lexer
.tokenize(text)
.into_iter()
.map(|t| (t.len, kind(t.kind)))
.scan(0usize, |start_offset, (len, kind)| {
let s: SmolStr = text[*start_offset..*start_offset + len].into();
*start_offset += len;
Some((kind, s))
})
.collect()
}
| {
None
} | conditional_block |
play.js | import api from '../../api/api.js'
let app=getApp()
let backgroundAudioManager = wx.getBackgroundAudioManager()
console.log(backgroundAudioManager.src);
if (backgroundAudioManager.src) {
}
backgroundAudioManager.title = wx.getStorageSync("musicList")[0]?wx.getStorageSync("musicList")[0].title:"Sour Candy"
backgroundAudioManager.singer = wx.getStorageSync("musicList")[0]?wx.getStorageSync("musicList")[0].singer:"Lady Gaga/BLACKPINK - Chromatica"
backgroundAudioManager.coverImgUrl = wx.getStorageSync("musicList")[0]?wx.getStorageSync("musicList")[0].picUrl:"https://p2.music.126.net/hPCvLRx5TxSWul9YY5n6sA==/109951165023441548.jpg"
backgroundAudioManager.src = wx.getStorageSync("musicList")[0]?wx.getStorageSync("musicList")[0].src:"http://m10.music.126.net/20200609181058/081e18703250af15bcb5ae133ce47ee9/ymusic/obj/w5zDlMODwrDDiGjCn8Ky/2687158303/2c0a/fc3f/31bc/9008459c57d35bcd1c0f425b6504a1c1.mp3"
app.globalData.backgroundAudioManager=backgroundAudioManager
app.globalData.backgroundAudioManager.pause()
Page({
/**
* 页面的初始数据
*/
data: {
//当前歌曲信息
songInfo:{},
//播放列表
musicList:[],
//正在播放
isPlay:false,
playType:'list_play',
playTypeList:[
{hanlderName:'list_play',src:'../../assets/images/list_play.png',className:'button-bar-list_play',hidden:false},
{hanlderName:'cycle_list_play',src:'../../assets/images/cycle_list_play.png',className:'button-bar-cycle_list_play',hidden:true},
{hanlderName:'cycle_single_play',src:'../../assets/images/cycle_single_play.png',className:'button-bar-cycle_single_play',hidden:true},
{hanlderName:'random_play',src:'../../assets/images/random_play.png',className:'button-bar-random_play',hidden:true},
],
//节流函数的开始
timeStart:0,
//进度条的位置(红柱的长度)
progressBarWidth:0,
//进度条的节点信息
progressBarPosition:[],
nowClientX:0,
newClientX:0
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
this.setData({
songInfo:app.globalData.songInfo,
musicList:app.globalData.musicList,
})
app.globalData.backgroundAudioManager.pause()
this.changePlayType('list_play')
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
let that=this
const query = wx.createSelectorQuery()
query.select('#progressBar').boundingClientRect()
query.selectViewport().scrollOffset()
query.exec(function(res){
that.setData({
progressBarPosition:res
})
})
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
if (app.globalData.id) {
let ids=this.data.musicList.map(v=>v.id)
//有ID,并且是新的ID
if (ids.indexOf(app.globalData.id)==-1) {
this.createPlayerFn(app.globalData.id)
}
//是已经有的ID,
if (ids.indexOf(app.globalData.id)!=-1) {
// 并且不是当前歌曲,做切歌操作
if (app.globalData.id!=this.data.songInfo.id) {
this.createBackgroundAudioManager(this.data.musicList[ids.indexOf(app.globalData.id)])
}else{ //就是当前歌曲,不做操作
return
}
}
}else{//刚进入没选歌,从下方Bar点进来
if (app.globalData.backgroundAudioManager.src) {
return
}
if (this.data.musicList.length>0) {
//创建播放器
this.createBackgroundAudioManager(this.data.musicList[0],false)
}else{
wx.showToast({
title: '播放列表为空,请添加歌曲',
icon: 'none',
image: '',
duration: 1500,
mask: false,
success: (result)=>{
// wx.switchTab({
// url: "../discover/discover",
// })
},
fail: ()=>{},
complete: ()=>{}
});
}
}
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
},
//请求播放地址和歌曲详情
createPlayerFn:async function (id) {
let resPlayURL=await api.getPlayURL({id:id}),//先拿到播放地址
resSongInfo=await api.getPlayDtail({ids:id}) //再拿歌曲详情(拿封面)
if (resPlayURL.code==200 && resSongInfo.code==200) {
//当前歌曲播放信息
let singer=''
//歌手可能多个
resSongInfo.songs[0].ar.map((item,index)=>{
singer+=index==resSongInfo.songs[0].ar.length-1?item.name:item.name+'/'
})
let songInfo={
id:id,
title:resSongInfo.songs[0].name,
singer:singer+' - '+resSongInfo.songs[0].al.name,//歌手/歌手/歌手-专辑
src:resPlayURL.data[0].url,
picUrl:resSongInfo.songs[0].al.picUrl,
songTime_ms:resSongInfo.songs[0].dt,//单位毫秒
songTime_s:resSongInfo.songs[0].dt/1000,//单位毫秒
}
//创建播放器
this.createBackgroundAudioManager(songInfo)
app.globalData.musicList.push(songInfo)
this.setData({
songInfo:songInfo,
musicList:app.globalData.musicList,
isPlay:true
})
wx.setStorageSync("musicList",this.data.musicList)
}else{
}
},
//控制播放
playControlFn:function (event) {
let hanlderName=event.currentTarget.dataset.hanldername
//----------------------第一个图标 调整播放模式--------------------
if (hanlderName=='list_play' || hanlderName=='cycle_list_play' || hanlderName=='cycle_single_play' || hanlderName=='random_play') {
let newhanlderName=''
//点击的是列表播放 (逻辑应设置为下一个,即列表循环)
if (hanlderName=='list_play') newhanlderName='cycle_list_play'
//点击的是列表循环 (逻辑应设置为下一个,即单曲循环)
if (hanlderName=='cycle_list_play') newhanlderName='cycle_single_play'
//点击的是单曲循环 (逻辑应设置为下一个,即随机播放)
if (hanlderName=='cycle_single_play') newhanlderName='random_play'
//点击的是随机播放 (逻辑应设置为下一个,即列表播放)
if (hanlderName=='random_play') newhanlderName='list_play'
this.changePlayType(newhanlderName)
return
}
// -------------------------------------------
//上一曲,下一曲
if (hanlderName=='prev' || hanlderName=='next') {
this.switchMusic(hanlderName)
}
//播放
if (hanlderName=='play') {
if (app.globalData.backgroundAudioManager.src) {
app.globalData.backgroundAudioManager.play()
}else{
this.createBackgroundAudioManager(this.data.songInfo)
}
this.setData({
isPlay:true
})
return
}
//暂停
if (hanlderName=='pause') {
app.globalData.backgroundAudioManager.pause()
this.setData({
isPlay:false
})
return
}
//列表
if (hanlderName=='list') {
wx.navigateTo({
url: '../musicList/musicList',
})
return
}
},
//上下曲切歌逻辑
switchMusic:function (type) {
let musicList=this.data.musicList,
nowIndex=this.getNowPlayIndex(), | newIndex=null
console.log(musicList);
console.log(nowIndex);
console.log(type);
console.log(playType);
if (musicList.length<=1) {
return
}
//随机播放(点上一曲或者下一曲哪个按钮都没联系,随机切)
if (playType=='random_play') {
//获取一个小于播放列表length的随机数并且不等于当前播放的index
let getRandomNum=function getRandomNumFn(index) {
newIndex=Math.floor(Math.random()*musicList.length)
if (newIndex==index) {
getRandomNumFn(index)
} else {
return newIndex
}
}
getRandomNum(nowIndex)
}
//其他三个是正常的上一曲和下一曲
if (playType=='list_play' || playType=='cycle_list_play' || playType=='cycle_single_play') {
//上一首
if (type=="prev") {
// 如果是第一首,上一首切换到最后一首
if (nowIndex==0) {
newIndex=musicList.length-1
}else{ //正常上一首
newIndex=nowIndex-1
}
}
//下一首
if (type=="next") {
// 如果是最后一首,下一首切换到第一首
if (nowIndex==musicList.length-1) {
newIndex=0
}else{ //正常下一首
newIndex=nowIndex+1
}
}
}
console.log(musicList[nowIndex].title);
console.log(musicList[newIndex].title);
this.createBackgroundAudioManager(musicList[newIndex])
},
//切换播放模式
changePlayType:function (playType) {
let playTypeList=this.data.playTypeList
//设置新的图标显示和播放模式
this.setData({
playTypeList:playTypeList.map(v=>{
return {
...v,
hidden:v.hanlderName==playType?false:true
}
}),
playType:playType
})
//改变自然播放完毕后的逻辑
app.globalData.backgroundAudioManager.onEnded(()=>{
console.log('in');
let nowIndex=this.getNowPlayIndex(),
musicList=this.data.musicList
//列表播放
if (playType=='list_play') {
//如果是最后一首,自然播放完毕后放完直接暂停
if (nowIndex==musicList.length-1) {
app.globalData.backgroundAudioManager.pause()
this.setData({
isPlay:false
})
return
}
//如果不是最后一首,自然播放完毕后调用正常下一首
this.switchMusic('next')
}
//列表循环,随机播放
if (playType=='cycle_list_play' || playType=='random_play') {
//自然播放完毕后调用正常下一首
this.switchMusic('next')
}
//单曲循环
if (playType=='cycle_single_play') {
//自然播放完毕后无需切歌,直接重复当前
this.createBackgroundAudioManager(musicList[nowIndex])
}
})
},
//获取当前播放的歌的index
getNowPlayIndex:function () {
let musicList=this.data.musicList,
result=null
musicList.map((item,index)=>{
if (item.id==this.data.songInfo.id) {
result=index
}
})
return result
},
//创建背景音乐播放器
createBackgroundAudioManager:function (songInfo,isPlay=true) {
app.globalData.backgroundAudioManager.title = songInfo.title
app.globalData.backgroundAudioManager.singer = songInfo.singer
if (isPlay) app.globalData.backgroundAudioManager.src = songInfo.src
if (this.data.isPlay==true || isPlay) {
//监听播放进度
let that=this
app.globalData.backgroundAudioManager.onTimeUpdate(()=>{
this.throttle('timeStart',this.onTimeUpdateThrottle(app.globalData.backgroundAudioManager.duration,app.globalData.backgroundAudioManager.currentTime),that,1000)()
})
}
app.globalData.songInfo=songInfo
this.setData({
songInfo:songInfo,
isPlay:this.data.isPlay==true?true:isPlay
})
},
//获取歌词
getLiric:function () {
// api.getLiric({id:this.data.songInfo.id}).then(res=>{
// console.log(res);
// })
},
//拖动白点
touchmoveFn:function (event) {
let nowClientX=this.data.nowClientX,
newClientX=this.data.newClientX
if (nowClientX!=newClientX) {
this.setData({
nowClientX:newClientX
})
this.nowProgressPosition(newClientX)
}
},
//点击进度条
clickProgressBar:function (event) {
this.nowProgressPosition(event.detail.x)
},
//获取当前触摸点的X轴位置
mainBarTouchmoveFn:function (event) {
this.throttle('timeStart',this.mainBarTouchmoveFnThrottle(event),this,500)()
},
//获取当前触摸点的X轴位置(节流)
mainBarTouchmoveFnThrottle:function (event) {
//因为要节流,所以把具体函数要return返上去,不然节流函数的callback拿不到方法
return function () {
this.setData({
newClientX:event.touches[0].clientX
})
}
},
//跟随播放进度,进度条自动移动
onTimeUpdateThrottle:function (duration,currentTime) {
return function () {
this.setData({
progressBarWidth:(currentTime/duration)*100+'%'
})
}
},
//节流
throttle:function (keyNmae,callback,newThis,delay=1000) {
let timeStart=newThis.data[keyNmae]
return function () {
let timeEnd=Date.now(),
args=arguments
if (timeEnd-timeStart>=delay) {
callback.apply(newThis,args)
newThis.setData({
[keyNmae]:timeEnd
})
}
}
},
//算出进度条现在的位置
nowProgressPosition:function (click_X) {
let progressBarPosition=this.data.progressBarPosition[0]
//点击的X轴-进度条的X轴获得现在红条的宽度,再除以进度条宽度,得到width的百分比宽度
let progressBarWidth=((click_X-progressBarPosition.left)/progressBarPosition.width)
this.setData({
progressBarWidth:progressBarWidth*100+'%'
})
let currentTime=progressBarWidth*app.globalData.backgroundAudioManager.duration
app.globalData.backgroundAudioManager.seek(currentTime)
}
}) | playType=this.data.playType, | random_line_split |
play.js | import api from '../../api/api.js'
let app=getApp()
let backgroundAudioManager = wx.getBackgroundAudioManager()
console.log(backgroundAudioManager.src);
if (backgroundAudioManager.src) {
}
backgroundAudioManager.title = wx.getStorageSync("musicList")[0]?wx.getStorageSync("musicList")[0].title:"Sour Candy"
backgroundAudioManager.singer = wx.getStorageSync("musicList")[0]?wx.getStorageSync("musicList")[0].singer:"Lady Gaga/BLACKPINK - Chromatica"
backgroundAudioManager.coverImgUrl = wx.getStorageSync("musicList")[0]?wx.getStorageSync("musicList")[0].picUrl:"https://p2.music.126.net/hPCvLRx5TxSWul9YY5n6sA==/109951165023441548.jpg"
backgroundAudioManager.src = wx.getStorageSync("musicList")[0]?wx.getStorageSync("musicList")[0].src:"http://m10.music.126.net/20200609181058/081e18703250af15bcb5ae133ce47ee9/ymusic/obj/w5zDlMODwrDDiGjCn8Ky/2687158303/2c0a/fc3f/31bc/9008459c57d35bcd1c0f425b6504a1c1.mp3"
app.globalData.backgroundAudioManager=backgroundAudioManager
app.globalData.backgroundAudioManager.pause()
Page({
/**
* 页面的初始数据
*/
data: {
//当前歌曲信息
songInfo:{},
//播放列表
musicList:[],
//正在播放
isPlay:false,
playType:'list_play',
playTypeList:[
{hanlderName:'list_play',src:'../../assets/images/list_play.png',className:'button-bar-list_play',hidden:false},
{hanlderName:'cycle_list_play',src:'../../assets/images/cycle_list_play.png',className:'button-bar-cycle_list_play',hidden:true},
{hanlderName:'cycle_single_play',src:'../../assets/images/cycle_single_play.png',className:'button-bar-cycle_single_play',hidden:true},
{hanlderName:'random_play',src:'../../assets/images/random_play.png',className:'button-bar-random_play',hidden:true},
],
//节流函数的开始
timeStart:0,
//进度条的位置(红柱的长度)
progressBarWidth:0,
//进度条的节点信息
progressBarPosition:[],
nowClientX:0,
newClientX:0
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
this.setData({
songInfo:app.globalData.songInfo,
musicList:app.globalData.musicList,
})
app.globalData.backgroundAudioManager.pause()
this.changePlayType('list_play')
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
let that=this
const query = wx.createSelectorQuery()
query.select('#progressBar').boundingClientRect()
query.selectViewport().scrollOffset()
query.exec(function(res){
that.setData({
progressBarPosition:res
})
})
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
if (app.globalData.id) {
let ids=this.data.musicList.map(v=>v.id)
//有ID,并且是新的ID
if (ids.indexOf(app.globalData.id)==-1) {
this.createPlayerFn(app.globalData.id)
}
//是已经有的ID,
if (ids.indexOf(app.globalData.id)!=-1) {
// 并且不是当前歌曲,做切歌操作
if (app.globalData.id!=this.data.songInfo.id) {
this.createBackgroundAudioManager(this.data.musicList[ids.indexOf(app.globalData.id)])
}else{ //就是当前歌曲,不做操作
return
}
}
}else{//刚进入没选歌,从下方Bar点进来
if (app.globalData.backgroundAudioManager.src) {
return
}
if (this.data.musicList.length>0) {
//创建播放器
this.createBackgroundAudioManager(this.data.musicList[0],false)
}else{
wx.showToast({
title: '播放列表为空,请添加歌曲',
icon: 'none',
image: '',
duration: 1500,
mask: false,
success: (result)=>{
// wx.switchTab({
// url: "../discover/discover",
// })
},
fail: ()=>{},
complete: ()=>{}
});
}
}
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
},
//请求播放地址和歌曲详情
createPlayerFn:async function (id) {
let resPlayURL=await api.getPlayURL({id:id}),//先拿到播放地址
resSongInfo=await api.getPlayDtail({ids:id}) //再拿歌曲详情(拿封面)
if (resPlayURL.code==200 && resSongInfo.code==200) {
//当前歌曲播放信息
let singer=''
//歌手可能多个
resSongInfo.songs[0].ar.map((item,index)=>{
singer+=index==resSongInfo.songs[0].ar.length-1?item.name:item.name+'/'
})
let songInfo={
id:id,
title:resSongInfo.songs[0].name,
singer:singer+' - '+resSongInfo.songs[0].al.name,//歌手/歌手/歌手-专辑
src:resPlayURL.data[0].url,
picUrl:resSongInfo.songs[0].al.picUrl,
songTime_ms:resSongInfo.songs[0].dt,//单位毫秒
songTime_s:resSongInfo.songs[0].dt/1000,//单位毫秒
}
//创建播放器
this.createBackgroundAudioManager(songInfo)
app.globalData.musicList.push(songInfo)
this.setData({
songInfo:songInfo,
musicList:app.globalData.musicList,
isPlay:true
})
wx.setStorageSync("musicList",this.data.musicList)
}else{
}
},
//控制播放
playControlFn:function (event) {
let hanlderName=event.currentTarget.dataset.hanldername
//----------------------第一个图标 调整播放模式--------------------
if (hanlderName=='list_play' || hanlderName=='cycle_list_play' || hanlderName=='cycle_single_play' || hanlderName=='random_play') {
let newhanlderName=''
//点击的是列表播放 (逻辑应设置为下一个,即列表循环)
if (hanlderName=='list_play') newhanlderName='cycle_list_play'
//点击的是列表循环 (逻辑应设置为下一个,即单曲循环)
if (hanlderName=='cycle_list_play') newhanlderName='cycle_single_play'
//点击的是单曲循环 (逻辑应设置为下一个,即随机播放)
if (hanlderName=='cycle_single_play') newhanlderName='random_play'
//点击的是随机播放 (逻辑应设置为下一个,即列表播放)
if (hanlderName=='random_play') newhanlderName='list_play'
this.changePlayType(newhanlderName)
return
}
// -------------------------------------------
//上一曲,下一曲
if (hanlderName=='prev' || hanlderName=='next') {
this.switchMusic(hanlderName)
}
//播放
if (hanlderName=='play') {
if (app.globalData.backgroundAudioManager.src) {
app.globalData.backgroundAudioManager.play()
}else{
this.createBackgroundAudioManager(this.data.songInfo)
}
this.setData({
isPlay:true
})
return
}
//暂停
if (hanlderName=='pause') {
app.globalData.backgroundAudioManager.pause()
this.setData({
isPlay:false
})
return
}
//列表
if (hanlderName=='list') {
wx.navigateTo({
url: '../musicList/musicList',
})
return
}
},
//上下曲切歌逻辑
switchMusic:function (type) {
let musicList=this.data.musicList,
nowIndex=this.getNowPlayIndex(),
playType=this.data.playType,
newIndex=null
console.log(musicList);
console.log(nowIndex);
console.log(type);
console.log(playType);
if (musicList.length<=1) {
return
}
//随机播放(点上一曲或者下一曲哪个按钮都没联系,随机切)
if (playType=='random_play') {
//获取一个小于播放列表length的随机数并且不等于当前播放的index
let getRandomNum=function getRandomNumFn(index) {
newIndex=Math.floor(Math.random()*musicList.length)
if (newIndex==index) {
getRandomNumFn(index)
} else {
return newIndex
}
}
getRandomNum(nowIndex)
}
//其他三个是正常的上一曲和下一曲
if (playType=='list_play' || playType=='cycle_list_play' || playType=='cycle_single_play') {
//上一首
if (type=="prev") {
// 如果是第一首,上一首切换到最后一首
if (nowIndex==0) {
newIndex=musicList.length-1
}else{ //正常上一首
newIndex=nowIndex-1
}
}
//下一首
if (type=="next") {
// 如果是最后一首,下一首切换到第一首
if (nowIndex==musicList.length-1) {
newIndex=0
}else{ //正常下一首
newIndex=nowIndex+1
}
}
}
console.log(musicList[nowIndex].title);
console.log(musicList[newIndex].title);
this.createBackgroundAudioManager(musicList[newIndex])
},
//切换播放模式
changePlayType:function (playType) {
let playTypeList=this.data.playTypeList
//设置新的图标显示和播放模式
this.setD | =>{
return {
...v,
hidden:v.hanlderName==playType?false:true
}
}),
playType:playType
})
//改变自然播放完毕后的逻辑
app.globalData.backgroundAudioManager.onEnded(()=>{
console.log('in');
let nowIndex=this.getNowPlayIndex(),
musicList=this.data.musicList
//列表播放
if (playType=='list_play') {
//如果是最后一首,自然播放完毕后放完直接暂停
if (nowIndex==musicList.length-1) {
app.globalData.backgroundAudioManager.pause()
this.setData({
isPlay:false
})
return
}
//如果不是最后一首,自然播放完毕后调用正常下一首
this.switchMusic('next')
}
//列表循环,随机播放
if (playType=='cycle_list_play' || playType=='random_play') {
//自然播放完毕后调用正常下一首
this.switchMusic('next')
}
//单曲循环
if (playType=='cycle_single_play') {
//自然播放完毕后无需切歌,直接重复当前
this.createBackgroundAudioManager(musicList[nowIndex])
}
})
},
//获取当前播放的歌的index
getNowPlayIndex:function () {
let musicList=this.data.musicList,
result=null
musicList.map((item,index)=>{
if (item.id==this.data.songInfo.id) {
result=index
}
})
return result
},
//创建背景音乐播放器
createBackgroundAudioManager:function (songInfo,isPlay=true) {
app.globalData.backgroundAudioManager.title = songInfo.title
app.globalData.backgroundAudioManager.singer = songInfo.singer
if (isPlay) app.globalData.backgroundAudioManager.src = songInfo.src
if (this.data.isPlay==true || isPlay) {
//监听播放进度
let that=this
app.globalData.backgroundAudioManager.onTimeUpdate(()=>{
this.throttle('timeStart',this.onTimeUpdateThrottle(app.globalData.backgroundAudioManager.duration,app.globalData.backgroundAudioManager.currentTime),that,1000)()
})
}
app.globalData.songInfo=songInfo
this.setData({
songInfo:songInfo,
isPlay:this.data.isPlay==true?true:isPlay
})
},
//获取歌词
getLiric:function () {
// api.getLiric({id:this.data.songInfo.id}).then(res=>{
// console.log(res);
// })
},
//拖动白点
touchmoveFn:function (event) {
let nowClientX=this.data.nowClientX,
newClientX=this.data.newClientX
if (nowClientX!=newClientX) {
this.setData({
nowClientX:newClientX
})
this.nowProgressPosition(newClientX)
}
},
//点击进度条
clickProgressBar:function (event) {
this.nowProgressPosition(event.detail.x)
},
//获取当前触摸点的X轴位置
mainBarTouchmoveFn:function (event) {
this.throttle('timeStart',this.mainBarTouchmoveFnThrottle(event),this,500)()
},
//获取当前触摸点的X轴位置(节流)
mainBarTouchmoveFnThrottle:function (event) {
//因为要节流,所以把具体函数要return返上去,不然节流函数的callback拿不到方法
return function () {
this.setData({
newClientX:event.touches[0].clientX
})
}
},
//跟随播放进度,进度条自动移动
onTimeUpdateThrottle:function (duration,currentTime) {
return function () {
this.setData({
progressBarWidth:(currentTime/duration)*100+'%'
})
}
},
//节流
throttle:function (keyNmae,callback,newThis,delay=1000) {
let timeStart=newThis.data[keyNmae]
return function () {
let timeEnd=Date.now(),
args=arguments
if (timeEnd-timeStart>=delay) {
callback.apply(newThis,args)
newThis.setData({
[keyNmae]:timeEnd
})
}
}
},
//算出进度条现在的位置
nowProgressPosition:function (click_X) {
let progressBarPosition=this.data.progressBarPosition[0]
//点击的X轴-进度条的X轴获得现在红条的宽度,再除以进度条宽度,得到width的百分比宽度
let progressBarWidth=((click_X-progressBarPosition.left)/progressBarPosition.width)
this.setData({
progressBarWidth:progressBarWidth*100+'%'
})
let currentTime=progressBarWidth*app.globalData.backgroundAudioManager.duration
app.globalData.backgroundAudioManager.seek(currentTime)
}
}) | ata({
playTypeList:playTypeList.map(v | conditional_block |
controller.go | // Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package controller provides a Kubernetes controller for a Caffe2 job resource.
package controller
import (
"errors"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
k8sinformers "k8s.io/client-go/informers"
clientv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
api "github.com/kubeflow/caffe2-operator/pkg/apis/caffe2/v1alpha1"
jobclient "github.com/kubeflow/caffe2-operator/pkg/client/clientset/versioned"
kubeflowscheme "github.com/kubeflow/caffe2-operator/pkg/client/clientset/versioned/scheme"
informers "github.com/kubeflow/caffe2-operator/pkg/client/informers/externalversions"
"github.com/kubeflow/caffe2-operator/pkg/client/informers/externalversions/kubeflow/v1alpha1"
listers "github.com/kubeflow/caffe2-operator/pkg/client/listers/kubeflow/v1alpha1"
)
const (
controllerName = "caffe2-operator"
// labels for pods and servers.
caffe2ReplicaTypeLabel = "caffe2_replica_type"
caffe2ReplicaIndexLabel = "caffe2_replica_index"
)
var (
ErrVersionOutdated = errors.New("requested version is outdated in apiserver")
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function but it should be just fine for non delete events.
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
// DefaultJobBackOff is the max backoff period, exported for the e2e test
DefaultJobBackOff = 10 * time.Second
// MaxJobBackOff is the max backoff period, exported for the e2e test
MaxJobBackOff = 360 * time.Second
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = api.SchemeGroupVersion.WithKind("Caffe2Job")
var groupVersionKind = schema.GroupVersionKind{
Group: api.GroupName,
Version: api.GroupVersion,
Kind: api.Caffe2JobResourceKind,
}
type ControllerConfiguration struct {
ReconcilerSyncLoopPeriod metav1.Duration
}
// DefaultCaffe2JobControllerConfiguration is the suggested caffe2-operator configuration for production.
var DefaultCaffe2JobControllerConfiguration ControllerConfiguration = ControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 15 * time.Second},
}
type Controller struct {
config ControllerConfiguration
// podControl is used to add or delete pods.
podControl PodControlInterface
// serviceControl is used to add or delete services.
serviceControl ServiceControlInterface
// kubeClient is a standard kubernetes clientset.
kubeClient kubernetes.Interface
// caffe2JobClientSet is a clientset for CRD Caffe2Job.
caffe2JobClient jobclient.Interface
// caffe2JobLister can list/get caffe2jobs from the shared informer's store.
caffe2JobLister listers.Caffe2JobLister
// podLister can list/get pods from the shared informer's store.
podLister corelisters.PodLister
// serviceLister can list/get services from the shared informer's store.
serviceLister corelisters.ServiceLister
podInformer clientv1.PodInformer
caffe2JobInformer v1alpha1.Caffe2JobInformer
// returns true if the caffe2job store has been synced at least once.
caffe2JobSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
podListerSynced cache.InformerSynced
// serviceListerSynced returns true if the service store has been synced at least once.
serviceListerSynced cache.InformerSynced
// WorkQueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workQueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// To allow injection of syncCaffe2Job for testing.
syncHandler func(jobKey string) (bool, error)
// To allow injection of updateStatus for testing.
updateStatusHandler func(job *api.Caffe2Job) error
}
func New(kubeClient kubernetes.Interface, caffe2JobClient jobclient.Interface) (*Controller, error) {
kubeflowscheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
podControl := RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
serviceControl := RealServiceControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
controller := &Controller{
podControl: podControl,
serviceControl: serviceControl,
kubeClient: kubeClient,
caffe2JobClient: caffe2JobClient,
workQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Caffe2jobs"),
recorder: recorder,
}
caffe2JobInformerFactory := informers.NewSharedInformerFactory(caffe2JobClient, time.Second*30)
podInformerFactory := k8sinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
controller.caffe2JobInformer = caffe2JobInformerFactory.Kubeflow().V1alpha1().Caffe2Jobs()
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
controller.caffe2JobInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *api.Caffe2Job:
glog.V(4).Infof("filter caffe2job name: %v", t.Name)
return true
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addCaffe2Job,
UpdateFunc: controller.updateCaffe2Job,
DeleteFunc: controller.enqueueController,
},
})
controller.caffe2JobLister = controller.caffe2JobInformer.Lister()
controller.caffe2JobSynced = controller.caffe2JobInformer.Informer().HasSynced
// create informer for pod information
controller.podInformer = podInformerFactory.Core().V1().Pods()
controller.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch obj.(type) {
case *v1.Pod:
pod := obj.(*v1.Pod)
if _, ok := pod.Labels["caffe2_job_key"]; !ok {
return false
}
return pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addPod,
UpdateFunc: controller.updatePod,
DeleteFunc: controller.deletePod,
},
})
controller.podLister = controller.podInformer.Lister()
controller.podListerSynced = controller.podInformer.Informer().HasSynced
controller.syncHandler = controller.syncCaffe2Job
controller.updateStatusHandler = controller.updateCaffe2JobStatus
return controller, nil
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workQueue.ShutDown()
go c.podInformer.Informer().Run(stopCh)
go c.caffe2JobInformer.Informer().Run(stopCh)
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Caffe2Job controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
glog.V(4).Info("Sync caffe2jobs...")
if ok := cache.WaitForCacheSync(stopCh, c.caffe2JobSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.V(4).Info("Sync pods...")
if ok := cache.WaitForCacheSync(stopCh, c.podListerSynced); !ok {
return fmt.Errorf("failed to wait for pod caches to sync")
}
glog.Infof("Starting %v workers", threadiness)
// Launch workers to process Caffe2Job resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
key, quit := c.workQueue.Get()
if quit {
return false
}
defer c.workQueue.Done(key)
forget, err := c.syncHandler(key.(string))
if err == nil {
if forget |
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing job: %v", err))
c.workQueue.AddRateLimited(key)
return true
}
// syncCaffe2Job will sync the job with the given. This function is not meant to be invoked
// concurrently with the same key.
//
// When a job is completely processed it will return true indicating that its ok to forget about this job since
// no more processing will occur for it.
func (c *Controller) syncCaffe2Job(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
if len(ns) == 0 || len(name) == 0 {
return false, fmt.Errorf("invalid job key %q: either namespace or name is missing", key)
}
job, err := c.caffe2JobLister.Caffe2Jobs(ns).Get(name)
if err != nil {
if apierrors.IsNotFound(err) {
glog.V(4).Infof("Job has been deleted: %v", key)
return true, nil
}
return false, err
}
glog.Infof("Caffe2Jobs: %#v", job)
var reconcileCaffe2JobsErr error
if job.DeletionTimestamp == nil {
reconcileCaffe2JobsErr = c.reconcileCaffe2Jobs(job)
}
if reconcileCaffe2JobsErr != nil {
return false, reconcileCaffe2JobsErr
}
return true, err
}
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item.
func (c *Controller) enqueueController(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.workQueue.AddRateLimited(key)
}
// reconcileCaffe2Jobs checks and updates replicas for each given Caffe2ReplicaSpec.
// It will requeue the caffe2job in case of an error while creating/deleting pods/services.
func (c *Controller) reconcileCaffe2Jobs(job *api.Caffe2Job) error {
glog.Infof("Reconcile Caffe2Jobs %s", job.Name)
pods, err := c.getPodsForCaffe2Job(job)
if err != nil {
glog.Infof("getPodsForCaffe2Job error %v", err)
return err
}
glog.V(4).Infof("Pods is %#v", pods)
/* TODO services
services, err := c.getServicesForCaffe2Job(job)
if err != nil {
glog.Infof("getServicesForCaffe2Job error %v", err)
return err
}
*/
// Diff current active pods/services with replicas.
spec := job.Spec.ReplicaSpecs
err = c.reconcilePods(job, pods, spec)
if err != nil {
glog.Infof("reconcilePods error %v", err)
return err
}
/*
err = c.reconcileServices(job, services, rtype, spec)
if err != nil {
glog.Infof("reconcileServices error %v", err)
return err
}
*/
// TODO: Add check here, no need to update the caffe2job if the status hasn't changed since last time.
return c.updateStatusHandler(job)
}
func genLabels(id, jobKey string) map[string]string {
return map[string]string{
"group_name": api.GroupName,
"caffe2_job_key": strings.Replace(jobKey, "/", "-", -1),
"runtime_id": id,
}
}
// When a pod is added, set the defaults and enqueue the current caffe2job.
func (c *Controller) addCaffe2Job(obj interface{}) {
job := obj.(*api.Caffe2Job)
msg := fmt.Sprintf("Caffe2Job %s is created.", job.Name)
glog.Info(msg)
scheme.Scheme.Default(job)
// Leave a created condition.
err := c.updateCaffe2JobConditions(job, api.Caffe2JobCreated, caffe2JobCreatedReason, msg)
if err != nil {
glog.Errorf("Append caffe2job condition error: %v", err)
return
}
c.enqueueController(obj)
}
// When a pod is updated, enqueue the current caffe2job.
func (c *Controller) updateCaffe2Job(old, cur interface{}) {
oldCaffe2Job := old.(*api.Caffe2Job)
glog.Infof("Updating caffe2job: %s", oldCaffe2Job.Name)
c.enqueueController(cur)
}
func (c *Controller) updateCaffe2JobStatus(job *api.Caffe2Job) error {
_, err := c.caffe2JobClient.KubeflowV1alpha1().Caffe2Jobs(job.Namespace).Update(job)
return err
}
func (c *Controller) updateCaffe2JobConditions(job *api.Caffe2Job, conditionType api.Caffe2JobConditionType, reason, message string) error {
condition := newCondition(conditionType, reason, message)
setCondition(&job.Status, condition)
return nil
}
// resolveControllerRef returns the tfjob referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching tfjob
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *api.Caffe2Job {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind {
return nil
}
job, err := c.caffe2JobLister.Caffe2Jobs(namespace).Get(controllerRef.Name)
if err != nil {
return nil
}
if job.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return job
}
func genOwnerReference(job *api.Caffe2Job) *metav1.OwnerReference {
boolPtr := func(b bool) *bool { return &b }
controllerRef := &metav1.OwnerReference{
APIVersion: groupVersionKind.GroupVersion().String(),
Kind: groupVersionKind.Kind,
Name: job.Name,
UID: job.UID,
BlockOwnerDeletion: boolPtr(true),
Controller: boolPtr(true),
}
return controllerRef
}
// newCondition creates a new caffe2job condition.
func newCondition(conditionType api.Caffe2JobConditionType, reason, message string) api.Caffe2JobCondition {
return api.Caffe2JobCondition{
Type: conditionType,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
// getCondition returns the condition with the provided type.
func getCondition(status api.Caffe2JobStatus, condType api.Caffe2JobConditionType) *api.Caffe2JobCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// setCondition updates the caffe2job to include the provided condition.
// If the condition that we are about to add already exists
// and has the same status and reason then we are not going to update.
func setCondition(status *api.Caffe2JobStatus, condition api.Caffe2JobCondition) {
currentCond := getCondition(*status, condition.Type)
// Do nothing if condition doesn't change
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
// Append the updated condition to the
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// removeCondition removes the caffe2job condition with the provided type.
func removementCondition(status *api.Caffe2JobStatus, condType api.Caffe2JobConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of caffe2job conditions without conditions with the provided type.
func filterOutCondition(conditions []api.Caffe2JobCondition, condType api.Caffe2JobConditionType) []api.Caffe2JobCondition {
var newConditions []api.Caffe2JobCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
}
| {
c.workQueue.Forget(key)
} | conditional_block |
controller.go | // Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package controller provides a Kubernetes controller for a Caffe2 job resource.
package controller
import (
"errors"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
k8sinformers "k8s.io/client-go/informers"
clientv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
api "github.com/kubeflow/caffe2-operator/pkg/apis/caffe2/v1alpha1"
jobclient "github.com/kubeflow/caffe2-operator/pkg/client/clientset/versioned"
kubeflowscheme "github.com/kubeflow/caffe2-operator/pkg/client/clientset/versioned/scheme"
informers "github.com/kubeflow/caffe2-operator/pkg/client/informers/externalversions"
"github.com/kubeflow/caffe2-operator/pkg/client/informers/externalversions/kubeflow/v1alpha1"
listers "github.com/kubeflow/caffe2-operator/pkg/client/listers/kubeflow/v1alpha1"
)
const (
controllerName = "caffe2-operator"
// labels for pods and servers.
caffe2ReplicaTypeLabel = "caffe2_replica_type"
caffe2ReplicaIndexLabel = "caffe2_replica_index"
)
var (
ErrVersionOutdated = errors.New("requested version is outdated in apiserver")
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function but it should be just fine for non delete events.
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
// DefaultJobBackOff is the max backoff period, exported for the e2e test
DefaultJobBackOff = 10 * time.Second
// MaxJobBackOff is the max backoff period, exported for the e2e test
MaxJobBackOff = 360 * time.Second
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = api.SchemeGroupVersion.WithKind("Caffe2Job")
var groupVersionKind = schema.GroupVersionKind{
Group: api.GroupName,
Version: api.GroupVersion,
Kind: api.Caffe2JobResourceKind,
}
type ControllerConfiguration struct {
ReconcilerSyncLoopPeriod metav1.Duration
}
// DefaultCaffe2JobControllerConfiguration is the suggested caffe2-operator configuration for production.
var DefaultCaffe2JobControllerConfiguration ControllerConfiguration = ControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 15 * time.Second},
}
type Controller struct {
config ControllerConfiguration
// podControl is used to add or delete pods.
podControl PodControlInterface
// serviceControl is used to add or delete services.
serviceControl ServiceControlInterface
// kubeClient is a standard kubernetes clientset.
kubeClient kubernetes.Interface
// caffe2JobClientSet is a clientset for CRD Caffe2Job.
caffe2JobClient jobclient.Interface
// caffe2JobLister can list/get caffe2jobs from the shared informer's store.
caffe2JobLister listers.Caffe2JobLister
// podLister can list/get pods from the shared informer's store.
podLister corelisters.PodLister
// serviceLister can list/get services from the shared informer's store.
serviceLister corelisters.ServiceLister
podInformer clientv1.PodInformer
caffe2JobInformer v1alpha1.Caffe2JobInformer
// returns true if the caffe2job store has been synced at least once.
caffe2JobSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
podListerSynced cache.InformerSynced
// serviceListerSynced returns true if the service store has been synced at least once.
serviceListerSynced cache.InformerSynced
// WorkQueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workQueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// To allow injection of syncCaffe2Job for testing.
syncHandler func(jobKey string) (bool, error)
// To allow injection of updateStatus for testing.
updateStatusHandler func(job *api.Caffe2Job) error
}
func New(kubeClient kubernetes.Interface, caffe2JobClient jobclient.Interface) (*Controller, error) {
kubeflowscheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
podControl := RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
serviceControl := RealServiceControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
controller := &Controller{
podControl: podControl,
serviceControl: serviceControl,
kubeClient: kubeClient,
caffe2JobClient: caffe2JobClient,
workQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Caffe2jobs"),
recorder: recorder,
}
caffe2JobInformerFactory := informers.NewSharedInformerFactory(caffe2JobClient, time.Second*30)
podInformerFactory := k8sinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
controller.caffe2JobInformer = caffe2JobInformerFactory.Kubeflow().V1alpha1().Caffe2Jobs()
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
controller.caffe2JobInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *api.Caffe2Job:
glog.V(4).Infof("filter caffe2job name: %v", t.Name)
return true
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addCaffe2Job,
UpdateFunc: controller.updateCaffe2Job,
DeleteFunc: controller.enqueueController,
},
})
controller.caffe2JobLister = controller.caffe2JobInformer.Lister()
controller.caffe2JobSynced = controller.caffe2JobInformer.Informer().HasSynced
// create informer for pod information
controller.podInformer = podInformerFactory.Core().V1().Pods()
controller.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch obj.(type) {
case *v1.Pod:
pod := obj.(*v1.Pod)
if _, ok := pod.Labels["caffe2_job_key"]; !ok {
return false
}
return pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addPod,
UpdateFunc: controller.updatePod,
DeleteFunc: controller.deletePod,
},
})
controller.podLister = controller.podInformer.Lister()
controller.podListerSynced = controller.podInformer.Informer().HasSynced
controller.syncHandler = controller.syncCaffe2Job
controller.updateStatusHandler = controller.updateCaffe2JobStatus
return controller, nil
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workQueue.ShutDown()
go c.podInformer.Informer().Run(stopCh)
go c.caffe2JobInformer.Informer().Run(stopCh)
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Caffe2Job controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
glog.V(4).Info("Sync caffe2jobs...")
if ok := cache.WaitForCacheSync(stopCh, c.caffe2JobSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.V(4).Info("Sync pods...")
if ok := cache.WaitForCacheSync(stopCh, c.podListerSynced); !ok {
return fmt.Errorf("failed to wait for pod caches to sync")
}
glog.Infof("Starting %v workers", threadiness)
// Launch workers to process Caffe2Job resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
key, quit := c.workQueue.Get()
if quit {
return false
}
defer c.workQueue.Done(key)
forget, err := c.syncHandler(key.(string))
if err == nil {
if forget {
c.workQueue.Forget(key)
}
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing job: %v", err))
c.workQueue.AddRateLimited(key)
return true
}
// syncCaffe2Job will sync the job with the given. This function is not meant to be invoked
// concurrently with the same key.
//
// When a job is completely processed it will return true indicating that its ok to forget about this job since
// no more processing will occur for it.
func (c *Controller) syncCaffe2Job(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
if len(ns) == 0 || len(name) == 0 {
return false, fmt.Errorf("invalid job key %q: either namespace or name is missing", key)
}
job, err := c.caffe2JobLister.Caffe2Jobs(ns).Get(name)
if err != nil {
if apierrors.IsNotFound(err) {
glog.V(4).Infof("Job has been deleted: %v", key)
return true, nil
}
return false, err
}
glog.Infof("Caffe2Jobs: %#v", job)
var reconcileCaffe2JobsErr error
if job.DeletionTimestamp == nil {
reconcileCaffe2JobsErr = c.reconcileCaffe2Jobs(job)
}
if reconcileCaffe2JobsErr != nil {
return false, reconcileCaffe2JobsErr
}
return true, err
}
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item.
func (c *Controller) enqueueController(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.workQueue.AddRateLimited(key)
}
// reconcileCaffe2Jobs checks and updates replicas for each given Caffe2ReplicaSpec.
// It will requeue the caffe2job in case of an error while creating/deleting pods/services.
func (c *Controller) reconcileCaffe2Jobs(job *api.Caffe2Job) error {
glog.Infof("Reconcile Caffe2Jobs %s", job.Name)
pods, err := c.getPodsForCaffe2Job(job)
if err != nil {
glog.Infof("getPodsForCaffe2Job error %v", err)
return err
}
glog.V(4).Infof("Pods is %#v", pods)
/* TODO services
services, err := c.getServicesForCaffe2Job(job)
if err != nil {
glog.Infof("getServicesForCaffe2Job error %v", err)
return err
}
*/
// Diff current active pods/services with replicas.
spec := job.Spec.ReplicaSpecs
err = c.reconcilePods(job, pods, spec)
if err != nil {
glog.Infof("reconcilePods error %v", err)
return err
}
/*
err = c.reconcileServices(job, services, rtype, spec)
if err != nil {
glog.Infof("reconcileServices error %v", err)
return err
}
*/
// TODO: Add check here, no need to update the caffe2job if the status hasn't changed since last time.
return c.updateStatusHandler(job)
}
func genLabels(id, jobKey string) map[string]string {
return map[string]string{
"group_name": api.GroupName,
"caffe2_job_key": strings.Replace(jobKey, "/", "-", -1),
"runtime_id": id,
}
}
// When a pod is added, set the defaults and enqueue the current caffe2job.
func (c *Controller) addCaffe2Job(obj interface{}) {
job := obj.(*api.Caffe2Job)
msg := fmt.Sprintf("Caffe2Job %s is created.", job.Name)
glog.Info(msg)
scheme.Scheme.Default(job)
// Leave a created condition.
err := c.updateCaffe2JobConditions(job, api.Caffe2JobCreated, caffe2JobCreatedReason, msg)
if err != nil {
glog.Errorf("Append caffe2job condition error: %v", err)
return
}
c.enqueueController(obj)
}
// When a pod is updated, enqueue the current caffe2job.
func (c *Controller) updateCaffe2Job(old, cur interface{}) {
oldCaffe2Job := old.(*api.Caffe2Job)
glog.Infof("Updating caffe2job: %s", oldCaffe2Job.Name)
c.enqueueController(cur)
}
func (c *Controller) updateCaffe2JobStatus(job *api.Caffe2Job) error {
_, err := c.caffe2JobClient.KubeflowV1alpha1().Caffe2Jobs(job.Namespace).Update(job)
return err
}
func (c *Controller) updateCaffe2JobConditions(job *api.Caffe2Job, conditionType api.Caffe2JobConditionType, reason, message string) error {
condition := newCondition(conditionType, reason, message)
setCondition(&job.Status, condition)
return nil
}
// resolveControllerRef returns the tfjob referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching tfjob
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *api.Caffe2Job {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind {
return nil
}
job, err := c.caffe2JobLister.Caffe2Jobs(namespace).Get(controllerRef.Name)
if err != nil {
return nil
}
if job.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return job
}
func genOwnerReference(job *api.Caffe2Job) *metav1.OwnerReference {
boolPtr := func(b bool) *bool { return &b }
controllerRef := &metav1.OwnerReference{
APIVersion: groupVersionKind.GroupVersion().String(),
Kind: groupVersionKind.Kind,
Name: job.Name,
UID: job.UID,
BlockOwnerDeletion: boolPtr(true),
Controller: boolPtr(true),
}
return controllerRef
}
// newCondition creates a new caffe2job condition.
func newCondition(conditionType api.Caffe2JobConditionType, reason, message string) api.Caffe2JobCondition {
return api.Caffe2JobCondition{
Type: conditionType,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
// getCondition returns the condition with the provided type.
func getCondition(status api.Caffe2JobStatus, condType api.Caffe2JobConditionType) *api.Caffe2JobCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// setCondition updates the caffe2job to include the provided condition.
// If the condition that we are about to add already exists
// and has the same status and reason then we are not going to update.
func setCondition(status *api.Caffe2JobStatus, condition api.Caffe2JobCondition) {
currentCond := getCondition(*status, condition.Type)
// Do nothing if condition doesn't change
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
// Append the updated condition to the
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// removeCondition removes the caffe2job condition with the provided type.
func removementCondition(status *api.Caffe2JobStatus, condType api.Caffe2JobConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of caffe2job conditions without conditions with the provided type.
func filterOutCondition(conditions []api.Caffe2JobCondition, condType api.Caffe2JobConditionType) []api.Caffe2JobCondition | {
var newConditions []api.Caffe2JobCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
} | identifier_body | |
controller.go | // Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package controller provides a Kubernetes controller for a Caffe2 job resource.
package controller
import (
"errors"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
k8sinformers "k8s.io/client-go/informers"
clientv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
api "github.com/kubeflow/caffe2-operator/pkg/apis/caffe2/v1alpha1"
jobclient "github.com/kubeflow/caffe2-operator/pkg/client/clientset/versioned"
kubeflowscheme "github.com/kubeflow/caffe2-operator/pkg/client/clientset/versioned/scheme"
informers "github.com/kubeflow/caffe2-operator/pkg/client/informers/externalversions"
"github.com/kubeflow/caffe2-operator/pkg/client/informers/externalversions/kubeflow/v1alpha1"
listers "github.com/kubeflow/caffe2-operator/pkg/client/listers/kubeflow/v1alpha1"
)
const (
controllerName = "caffe2-operator"
// labels for pods and servers.
caffe2ReplicaTypeLabel = "caffe2_replica_type"
caffe2ReplicaIndexLabel = "caffe2_replica_index"
)
var (
ErrVersionOutdated = errors.New("requested version is outdated in apiserver")
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function but it should be just fine for non delete events.
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
// DefaultJobBackOff is the max backoff period, exported for the e2e test
DefaultJobBackOff = 10 * time.Second
// MaxJobBackOff is the max backoff period, exported for the e2e test
MaxJobBackOff = 360 * time.Second
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = api.SchemeGroupVersion.WithKind("Caffe2Job")
var groupVersionKind = schema.GroupVersionKind{
Group: api.GroupName,
Version: api.GroupVersion,
Kind: api.Caffe2JobResourceKind,
}
type ControllerConfiguration struct {
ReconcilerSyncLoopPeriod metav1.Duration
}
// DefaultCaffe2JobControllerConfiguration is the suggested caffe2-operator configuration for production.
var DefaultCaffe2JobControllerConfiguration ControllerConfiguration = ControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 15 * time.Second},
}
type Controller struct {
config ControllerConfiguration
// podControl is used to add or delete pods.
podControl PodControlInterface
// serviceControl is used to add or delete services.
serviceControl ServiceControlInterface
// kubeClient is a standard kubernetes clientset.
kubeClient kubernetes.Interface | // caffe2JobLister can list/get caffe2jobs from the shared informer's store.
caffe2JobLister listers.Caffe2JobLister
// podLister can list/get pods from the shared informer's store.
podLister corelisters.PodLister
// serviceLister can list/get services from the shared informer's store.
serviceLister corelisters.ServiceLister
podInformer clientv1.PodInformer
caffe2JobInformer v1alpha1.Caffe2JobInformer
// returns true if the caffe2job store has been synced at least once.
caffe2JobSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
podListerSynced cache.InformerSynced
// serviceListerSynced returns true if the service store has been synced at least once.
serviceListerSynced cache.InformerSynced
// WorkQueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workQueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// To allow injection of syncCaffe2Job for testing.
syncHandler func(jobKey string) (bool, error)
// To allow injection of updateStatus for testing.
updateStatusHandler func(job *api.Caffe2Job) error
}
func New(kubeClient kubernetes.Interface, caffe2JobClient jobclient.Interface) (*Controller, error) {
kubeflowscheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
podControl := RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
serviceControl := RealServiceControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
controller := &Controller{
podControl: podControl,
serviceControl: serviceControl,
kubeClient: kubeClient,
caffe2JobClient: caffe2JobClient,
workQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Caffe2jobs"),
recorder: recorder,
}
caffe2JobInformerFactory := informers.NewSharedInformerFactory(caffe2JobClient, time.Second*30)
podInformerFactory := k8sinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
controller.caffe2JobInformer = caffe2JobInformerFactory.Kubeflow().V1alpha1().Caffe2Jobs()
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
controller.caffe2JobInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *api.Caffe2Job:
glog.V(4).Infof("filter caffe2job name: %v", t.Name)
return true
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addCaffe2Job,
UpdateFunc: controller.updateCaffe2Job,
DeleteFunc: controller.enqueueController,
},
})
controller.caffe2JobLister = controller.caffe2JobInformer.Lister()
controller.caffe2JobSynced = controller.caffe2JobInformer.Informer().HasSynced
// create informer for pod information
controller.podInformer = podInformerFactory.Core().V1().Pods()
controller.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch obj.(type) {
case *v1.Pod:
pod := obj.(*v1.Pod)
if _, ok := pod.Labels["caffe2_job_key"]; !ok {
return false
}
return pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addPod,
UpdateFunc: controller.updatePod,
DeleteFunc: controller.deletePod,
},
})
controller.podLister = controller.podInformer.Lister()
controller.podListerSynced = controller.podInformer.Informer().HasSynced
controller.syncHandler = controller.syncCaffe2Job
controller.updateStatusHandler = controller.updateCaffe2JobStatus
return controller, nil
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workQueue.ShutDown()
go c.podInformer.Informer().Run(stopCh)
go c.caffe2JobInformer.Informer().Run(stopCh)
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Caffe2Job controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
glog.V(4).Info("Sync caffe2jobs...")
if ok := cache.WaitForCacheSync(stopCh, c.caffe2JobSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.V(4).Info("Sync pods...")
if ok := cache.WaitForCacheSync(stopCh, c.podListerSynced); !ok {
return fmt.Errorf("failed to wait for pod caches to sync")
}
glog.Infof("Starting %v workers", threadiness)
// Launch workers to process Caffe2Job resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
key, quit := c.workQueue.Get()
if quit {
return false
}
defer c.workQueue.Done(key)
forget, err := c.syncHandler(key.(string))
if err == nil {
if forget {
c.workQueue.Forget(key)
}
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing job: %v", err))
c.workQueue.AddRateLimited(key)
return true
}
// syncCaffe2Job will sync the job with the given. This function is not meant to be invoked
// concurrently with the same key.
//
// When a job is completely processed it will return true indicating that its ok to forget about this job since
// no more processing will occur for it.
func (c *Controller) syncCaffe2Job(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
if len(ns) == 0 || len(name) == 0 {
return false, fmt.Errorf("invalid job key %q: either namespace or name is missing", key)
}
job, err := c.caffe2JobLister.Caffe2Jobs(ns).Get(name)
if err != nil {
if apierrors.IsNotFound(err) {
glog.V(4).Infof("Job has been deleted: %v", key)
return true, nil
}
return false, err
}
glog.Infof("Caffe2Jobs: %#v", job)
var reconcileCaffe2JobsErr error
if job.DeletionTimestamp == nil {
reconcileCaffe2JobsErr = c.reconcileCaffe2Jobs(job)
}
if reconcileCaffe2JobsErr != nil {
return false, reconcileCaffe2JobsErr
}
return true, err
}
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item.
func (c *Controller) enqueueController(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.workQueue.AddRateLimited(key)
}
// reconcileCaffe2Jobs checks and updates replicas for each given Caffe2ReplicaSpec.
// It will requeue the caffe2job in case of an error while creating/deleting pods/services.
func (c *Controller) reconcileCaffe2Jobs(job *api.Caffe2Job) error {
glog.Infof("Reconcile Caffe2Jobs %s", job.Name)
pods, err := c.getPodsForCaffe2Job(job)
if err != nil {
glog.Infof("getPodsForCaffe2Job error %v", err)
return err
}
glog.V(4).Infof("Pods is %#v", pods)
/* TODO services
services, err := c.getServicesForCaffe2Job(job)
if err != nil {
glog.Infof("getServicesForCaffe2Job error %v", err)
return err
}
*/
// Diff current active pods/services with replicas.
spec := job.Spec.ReplicaSpecs
err = c.reconcilePods(job, pods, spec)
if err != nil {
glog.Infof("reconcilePods error %v", err)
return err
}
/*
err = c.reconcileServices(job, services, rtype, spec)
if err != nil {
glog.Infof("reconcileServices error %v", err)
return err
}
*/
// TODO: Add check here, no need to update the caffe2job if the status hasn't changed since last time.
return c.updateStatusHandler(job)
}
func genLabels(id, jobKey string) map[string]string {
return map[string]string{
"group_name": api.GroupName,
"caffe2_job_key": strings.Replace(jobKey, "/", "-", -1),
"runtime_id": id,
}
}
// When a pod is added, set the defaults and enqueue the current caffe2job.
func (c *Controller) addCaffe2Job(obj interface{}) {
job := obj.(*api.Caffe2Job)
msg := fmt.Sprintf("Caffe2Job %s is created.", job.Name)
glog.Info(msg)
scheme.Scheme.Default(job)
// Leave a created condition.
err := c.updateCaffe2JobConditions(job, api.Caffe2JobCreated, caffe2JobCreatedReason, msg)
if err != nil {
glog.Errorf("Append caffe2job condition error: %v", err)
return
}
c.enqueueController(obj)
}
// When a pod is updated, enqueue the current caffe2job.
func (c *Controller) updateCaffe2Job(old, cur interface{}) {
oldCaffe2Job := old.(*api.Caffe2Job)
glog.Infof("Updating caffe2job: %s", oldCaffe2Job.Name)
c.enqueueController(cur)
}
func (c *Controller) updateCaffe2JobStatus(job *api.Caffe2Job) error {
_, err := c.caffe2JobClient.KubeflowV1alpha1().Caffe2Jobs(job.Namespace).Update(job)
return err
}
func (c *Controller) updateCaffe2JobConditions(job *api.Caffe2Job, conditionType api.Caffe2JobConditionType, reason, message string) error {
condition := newCondition(conditionType, reason, message)
setCondition(&job.Status, condition)
return nil
}
// resolveControllerRef returns the tfjob referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching tfjob
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *api.Caffe2Job {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind {
return nil
}
job, err := c.caffe2JobLister.Caffe2Jobs(namespace).Get(controllerRef.Name)
if err != nil {
return nil
}
if job.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return job
}
func genOwnerReference(job *api.Caffe2Job) *metav1.OwnerReference {
boolPtr := func(b bool) *bool { return &b }
controllerRef := &metav1.OwnerReference{
APIVersion: groupVersionKind.GroupVersion().String(),
Kind: groupVersionKind.Kind,
Name: job.Name,
UID: job.UID,
BlockOwnerDeletion: boolPtr(true),
Controller: boolPtr(true),
}
return controllerRef
}
// newCondition creates a new caffe2job condition.
func newCondition(conditionType api.Caffe2JobConditionType, reason, message string) api.Caffe2JobCondition {
return api.Caffe2JobCondition{
Type: conditionType,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
// getCondition returns the condition with the provided type.
func getCondition(status api.Caffe2JobStatus, condType api.Caffe2JobConditionType) *api.Caffe2JobCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// setCondition updates the caffe2job to include the provided condition.
// If the condition that we are about to add already exists
// and has the same status and reason then we are not going to update.
func setCondition(status *api.Caffe2JobStatus, condition api.Caffe2JobCondition) {
currentCond := getCondition(*status, condition.Type)
// Do nothing if condition doesn't change
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
// Append the updated condition to the
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// removeCondition removes the caffe2job condition with the provided type.
func removementCondition(status *api.Caffe2JobStatus, condType api.Caffe2JobConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of caffe2job conditions without conditions with the provided type.
func filterOutCondition(conditions []api.Caffe2JobCondition, condType api.Caffe2JobConditionType) []api.Caffe2JobCondition {
var newConditions []api.Caffe2JobCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
} | // caffe2JobClientSet is a clientset for CRD Caffe2Job.
caffe2JobClient jobclient.Interface
| random_line_split |
controller.go | // Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package controller provides a Kubernetes controller for a Caffe2 job resource.
package controller
import (
"errors"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
k8sinformers "k8s.io/client-go/informers"
clientv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
api "github.com/kubeflow/caffe2-operator/pkg/apis/caffe2/v1alpha1"
jobclient "github.com/kubeflow/caffe2-operator/pkg/client/clientset/versioned"
kubeflowscheme "github.com/kubeflow/caffe2-operator/pkg/client/clientset/versioned/scheme"
informers "github.com/kubeflow/caffe2-operator/pkg/client/informers/externalversions"
"github.com/kubeflow/caffe2-operator/pkg/client/informers/externalversions/kubeflow/v1alpha1"
listers "github.com/kubeflow/caffe2-operator/pkg/client/listers/kubeflow/v1alpha1"
)
const (
controllerName = "caffe2-operator"
// labels for pods and servers.
caffe2ReplicaTypeLabel = "caffe2_replica_type"
caffe2ReplicaIndexLabel = "caffe2_replica_index"
)
var (
ErrVersionOutdated = errors.New("requested version is outdated in apiserver")
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function but it should be just fine for non delete events.
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
// DefaultJobBackOff is the max backoff period, exported for the e2e test
DefaultJobBackOff = 10 * time.Second
// MaxJobBackOff is the max backoff period, exported for the e2e test
MaxJobBackOff = 360 * time.Second
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = api.SchemeGroupVersion.WithKind("Caffe2Job")
var groupVersionKind = schema.GroupVersionKind{
Group: api.GroupName,
Version: api.GroupVersion,
Kind: api.Caffe2JobResourceKind,
}
type ControllerConfiguration struct {
ReconcilerSyncLoopPeriod metav1.Duration
}
// DefaultCaffe2JobControllerConfiguration is the suggested caffe2-operator configuration for production.
var DefaultCaffe2JobControllerConfiguration ControllerConfiguration = ControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 15 * time.Second},
}
type Controller struct {
config ControllerConfiguration
// podControl is used to add or delete pods.
podControl PodControlInterface
// serviceControl is used to add or delete services.
serviceControl ServiceControlInterface
// kubeClient is a standard kubernetes clientset.
kubeClient kubernetes.Interface
// caffe2JobClientSet is a clientset for CRD Caffe2Job.
caffe2JobClient jobclient.Interface
// caffe2JobLister can list/get caffe2jobs from the shared informer's store.
caffe2JobLister listers.Caffe2JobLister
// podLister can list/get pods from the shared informer's store.
podLister corelisters.PodLister
// serviceLister can list/get services from the shared informer's store.
serviceLister corelisters.ServiceLister
podInformer clientv1.PodInformer
caffe2JobInformer v1alpha1.Caffe2JobInformer
// returns true if the caffe2job store has been synced at least once.
caffe2JobSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
podListerSynced cache.InformerSynced
// serviceListerSynced returns true if the service store has been synced at least once.
serviceListerSynced cache.InformerSynced
// WorkQueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workQueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// To allow injection of syncCaffe2Job for testing.
syncHandler func(jobKey string) (bool, error)
// To allow injection of updateStatus for testing.
updateStatusHandler func(job *api.Caffe2Job) error
}
func New(kubeClient kubernetes.Interface, caffe2JobClient jobclient.Interface) (*Controller, error) {
kubeflowscheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
podControl := RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
serviceControl := RealServiceControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
controller := &Controller{
podControl: podControl,
serviceControl: serviceControl,
kubeClient: kubeClient,
caffe2JobClient: caffe2JobClient,
workQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Caffe2jobs"),
recorder: recorder,
}
caffe2JobInformerFactory := informers.NewSharedInformerFactory(caffe2JobClient, time.Second*30)
podInformerFactory := k8sinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
controller.caffe2JobInformer = caffe2JobInformerFactory.Kubeflow().V1alpha1().Caffe2Jobs()
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
controller.caffe2JobInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *api.Caffe2Job:
glog.V(4).Infof("filter caffe2job name: %v", t.Name)
return true
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addCaffe2Job,
UpdateFunc: controller.updateCaffe2Job,
DeleteFunc: controller.enqueueController,
},
})
controller.caffe2JobLister = controller.caffe2JobInformer.Lister()
controller.caffe2JobSynced = controller.caffe2JobInformer.Informer().HasSynced
// create informer for pod information
controller.podInformer = podInformerFactory.Core().V1().Pods()
controller.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch obj.(type) {
case *v1.Pod:
pod := obj.(*v1.Pod)
if _, ok := pod.Labels["caffe2_job_key"]; !ok {
return false
}
return pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addPod,
UpdateFunc: controller.updatePod,
DeleteFunc: controller.deletePod,
},
})
controller.podLister = controller.podInformer.Lister()
controller.podListerSynced = controller.podInformer.Informer().HasSynced
controller.syncHandler = controller.syncCaffe2Job
controller.updateStatusHandler = controller.updateCaffe2JobStatus
return controller, nil
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workQueue.ShutDown()
go c.podInformer.Informer().Run(stopCh)
go c.caffe2JobInformer.Informer().Run(stopCh)
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Caffe2Job controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
glog.V(4).Info("Sync caffe2jobs...")
if ok := cache.WaitForCacheSync(stopCh, c.caffe2JobSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.V(4).Info("Sync pods...")
if ok := cache.WaitForCacheSync(stopCh, c.podListerSynced); !ok {
return fmt.Errorf("failed to wait for pod caches to sync")
}
glog.Infof("Starting %v workers", threadiness)
// Launch workers to process Caffe2Job resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) | () bool {
key, quit := c.workQueue.Get()
if quit {
return false
}
defer c.workQueue.Done(key)
forget, err := c.syncHandler(key.(string))
if err == nil {
if forget {
c.workQueue.Forget(key)
}
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing job: %v", err))
c.workQueue.AddRateLimited(key)
return true
}
// syncCaffe2Job will sync the job with the given. This function is not meant to be invoked
// concurrently with the same key.
//
// When a job is completely processed it will return true indicating that its ok to forget about this job since
// no more processing will occur for it.
func (c *Controller) syncCaffe2Job(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
if len(ns) == 0 || len(name) == 0 {
return false, fmt.Errorf("invalid job key %q: either namespace or name is missing", key)
}
job, err := c.caffe2JobLister.Caffe2Jobs(ns).Get(name)
if err != nil {
if apierrors.IsNotFound(err) {
glog.V(4).Infof("Job has been deleted: %v", key)
return true, nil
}
return false, err
}
glog.Infof("Caffe2Jobs: %#v", job)
var reconcileCaffe2JobsErr error
if job.DeletionTimestamp == nil {
reconcileCaffe2JobsErr = c.reconcileCaffe2Jobs(job)
}
if reconcileCaffe2JobsErr != nil {
return false, reconcileCaffe2JobsErr
}
return true, err
}
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item.
func (c *Controller) enqueueController(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.workQueue.AddRateLimited(key)
}
// reconcileCaffe2Jobs checks and updates replicas for each given Caffe2ReplicaSpec.
// It will requeue the caffe2job in case of an error while creating/deleting pods/services.
func (c *Controller) reconcileCaffe2Jobs(job *api.Caffe2Job) error {
glog.Infof("Reconcile Caffe2Jobs %s", job.Name)
pods, err := c.getPodsForCaffe2Job(job)
if err != nil {
glog.Infof("getPodsForCaffe2Job error %v", err)
return err
}
glog.V(4).Infof("Pods is %#v", pods)
/* TODO services
services, err := c.getServicesForCaffe2Job(job)
if err != nil {
glog.Infof("getServicesForCaffe2Job error %v", err)
return err
}
*/
// Diff current active pods/services with replicas.
spec := job.Spec.ReplicaSpecs
err = c.reconcilePods(job, pods, spec)
if err != nil {
glog.Infof("reconcilePods error %v", err)
return err
}
/*
err = c.reconcileServices(job, services, rtype, spec)
if err != nil {
glog.Infof("reconcileServices error %v", err)
return err
}
*/
// TODO: Add check here, no need to update the caffe2job if the status hasn't changed since last time.
return c.updateStatusHandler(job)
}
func genLabels(id, jobKey string) map[string]string {
return map[string]string{
"group_name": api.GroupName,
"caffe2_job_key": strings.Replace(jobKey, "/", "-", -1),
"runtime_id": id,
}
}
// When a pod is added, set the defaults and enqueue the current caffe2job.
func (c *Controller) addCaffe2Job(obj interface{}) {
job := obj.(*api.Caffe2Job)
msg := fmt.Sprintf("Caffe2Job %s is created.", job.Name)
glog.Info(msg)
scheme.Scheme.Default(job)
// Leave a created condition.
err := c.updateCaffe2JobConditions(job, api.Caffe2JobCreated, caffe2JobCreatedReason, msg)
if err != nil {
glog.Errorf("Append caffe2job condition error: %v", err)
return
}
c.enqueueController(obj)
}
// When a pod is updated, enqueue the current caffe2job.
func (c *Controller) updateCaffe2Job(old, cur interface{}) {
oldCaffe2Job := old.(*api.Caffe2Job)
glog.Infof("Updating caffe2job: %s", oldCaffe2Job.Name)
c.enqueueController(cur)
}
func (c *Controller) updateCaffe2JobStatus(job *api.Caffe2Job) error {
_, err := c.caffe2JobClient.KubeflowV1alpha1().Caffe2Jobs(job.Namespace).Update(job)
return err
}
func (c *Controller) updateCaffe2JobConditions(job *api.Caffe2Job, conditionType api.Caffe2JobConditionType, reason, message string) error {
condition := newCondition(conditionType, reason, message)
setCondition(&job.Status, condition)
return nil
}
// resolveControllerRef returns the tfjob referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching tfjob
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *api.Caffe2Job {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind {
return nil
}
job, err := c.caffe2JobLister.Caffe2Jobs(namespace).Get(controllerRef.Name)
if err != nil {
return nil
}
if job.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return job
}
func genOwnerReference(job *api.Caffe2Job) *metav1.OwnerReference {
boolPtr := func(b bool) *bool { return &b }
controllerRef := &metav1.OwnerReference{
APIVersion: groupVersionKind.GroupVersion().String(),
Kind: groupVersionKind.Kind,
Name: job.Name,
UID: job.UID,
BlockOwnerDeletion: boolPtr(true),
Controller: boolPtr(true),
}
return controllerRef
}
// newCondition creates a new caffe2job condition.
func newCondition(conditionType api.Caffe2JobConditionType, reason, message string) api.Caffe2JobCondition {
return api.Caffe2JobCondition{
Type: conditionType,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
// getCondition returns the condition with the provided type.
func getCondition(status api.Caffe2JobStatus, condType api.Caffe2JobConditionType) *api.Caffe2JobCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// setCondition updates the caffe2job to include the provided condition.
// If the condition that we are about to add already exists
// and has the same status and reason then we are not going to update.
func setCondition(status *api.Caffe2JobStatus, condition api.Caffe2JobCondition) {
currentCond := getCondition(*status, condition.Type)
// Do nothing if condition doesn't change
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
// Append the updated condition to the
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// removeCondition removes the caffe2job condition with the provided type.
func removementCondition(status *api.Caffe2JobStatus, condType api.Caffe2JobConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of caffe2job conditions without conditions with the provided type.
func filterOutCondition(conditions []api.Caffe2JobCondition, condType api.Caffe2JobConditionType) []api.Caffe2JobCondition {
var newConditions []api.Caffe2JobCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
}
| processNextWorkItem | identifier_name |
featureFileReader.js | /*
* The MIT License (MIT)
*
* Copyright (c) 2014 Broad Institute
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import FeatureParser from "./featureParser.js"
import SegParser from "./segParser.js"
import VcfParser from "../variant/vcfParser.js"
import {BGZip, FileUtils, igvxhr, URIUtils} from "../../node_modules/igv-utils/src/index.js"
import {buildOptions, isDataURL} from "../util/igvUtils.js"
import GWASParser from "../gwas/gwasParser.js"
import AEDParser from "../aed/AEDParser.js"
import {loadIndex} from "../bam/indexFactory.js"
import getDataWrapper from "./dataWrapper.js"
import BGZLineReader from "../util/bgzLineReader.js"
import BGZBlockLoader from "../bam/bgzBlockLoader.js"
/**
* Reader for "bed like" files (tab delimited files with 1 feature per line: bed, gff, vcf, etc)
*
* @param config
* @constructor
*/
class FeatureFileReader {
constructor(config, genome) {
var uriParts
this.config = config || {}
this.genome = genome
this.indexURL = config.indexURL
this.indexed = config.indexed || this.indexURL !== undefined
this.queryable = this.indexed
if (FileUtils.isFile(this.config.url)) {
this.filename = this.config.url.name
} else if (isDataURL(this.config.url)) {
this.indexed = false // by definition
this.dataURI = config.url
} else {
uriParts = URIUtils.parseUri(this.config.url)
this.filename = config.filename || uriParts.file
}
this.parser = this.getParser(this.config)
if (this.config.format === "vcf" && !this.config.indexURL) {
console.warn("Warning: index file not specified. The entire vcf file will be loaded.")
}
}
async defaultVisibilityWindow() {
if (this.config.indexURL) {
const index = await this.getIndex()
if (index && index.lastBlockPosition) {
let gl = 0
const s = 10000
for (let c of index.chromosomeNames) {
const chromosome = this.genome.getChromosome(c)
if (chromosome) {
gl += chromosome.bpLength
}
}
return Math.round((gl / index.lastBlockPosition) * s)
}
}
}
/**
* Return a promise to load features for the genomic interval
* @param chr
* @param start
* @param end
*/
async readFeatures(chr, start, end) {
const index = await this.getIndex()
if (index) {
this.indexed = true
return this.loadFeaturesWithIndex(chr, start, end)
} else if (this.dataURI) {
this.indexed = false
return this.loadFeaturesFromDataURI()
} else {
this.indexed = false
return this.loadFeaturesNoIndex()
}
}
async readHeader() {
if (this.dataURI) {
await this.loadFeaturesFromDataURI(this.dataURI)
return this.header
} else {
if (this.config.indexURL) {
const index = await this.getIndex()
if (!index) {
// Note - it should be impossible to get here
throw new Error("Unable to load index: " + this.config.indexURL)
}
let dataWrapper
if (index.tabix) {
this._blockLoader = new BGZBlockLoader(this.config);
dataWrapper = new BGZLineReader(this.config)
} else {
// Tribble
const maxSize = Object.values(index.chrIndex)
.flatMap(chr => chr.blocks)
.map(block => block.max)
.reduce((previous, current) =>
Math.min(previous, current), Number.MAX_SAFE_INTEGER)
const options = buildOptions(this.config, {bgz: index.tabix, range: {start: 0, size: maxSize}})
const data = await igvxhr.loadString(this.config.url, options)
dataWrapper = getDataWrapper(data)
}
this.header = await this.parser.parseHeader(dataWrapper) // Cache header, might be needed to parse features
return this.header
} else {
// If this is a non-indexed file we will load all features in advance
const options = buildOptions(this.config)
const data = await igvxhr.loadString(this.config.url, options)
let dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
// Reset data wrapper and parse features
dataWrapper = getDataWrapper(data)
this.features = await this.parser.parseFeatures(dataWrapper) // cache features
return this.header
}
}
}
getParser(config) {
switch (config.format) {
case "vcf":
return new VcfParser(config)
case "seg" :
return new SegParser("seg")
case "mut":
return new SegParser("mut")
case "maf":
return new SegParser("maf")
case "gwas" :
return new GWASParser(config)
case "aed" :
return new AEDParser(config)
default:
return new FeatureParser(config)
}
}
async loadFeaturesNoIndex() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const options = buildOptions(this.config) // Add oauth token, if any
const data = await igvxhr.loadString(this.config.url, options)
if (!this.header) {
const dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
}
const dataWrapper = getDataWrapper(data)
const features = await this.parser.parseFeatures(dataWrapper) // <= PARSING DONE HERE
return features
}
}
async | (chr, start, end) {
// insure that header has been loaded -- tabix _blockLoader is initialized as side effect
if(!this.dataURI && !this.header) {
await this.readHeader()
}
//console.log("Using index"
const config = this.config
const parser = this.parser
const tabix = this.index.tabix
const refId = tabix ? this.index.sequenceIndexMap[chr] : chr
if (refId === undefined) {
return []
}
const genome = this.genome
const chunks = this.index.chunksForRange(refId, start, end)
if (!chunks || chunks.length === 0) {
return []
} else {
const allFeatures = []
for (let chunk of chunks) {
let inflated
if (tabix) {
inflated = await this._blockLoader.getData(chunk.minv, chunk.maxv)
} else {
const options = buildOptions(config, {
range: {
start: chunk.minv.block,
size: chunk.maxv.block - chunk.minv.block + 1
}
})
inflated = await igvxhr.loadString(config.url, options)
}
const slicedData = chunk.minv.offset ? inflated.slice(chunk.minv.offset) : inflated
const dataWrapper = getDataWrapper(slicedData)
let slicedFeatures = await parser.parseFeatures(dataWrapper)
// Filter psuedo-features (e.g. created mates for VCF SV records)
slicedFeatures = slicedFeatures.filter(f => f._f === undefined)
// Filter features not in requested range.
let inInterval = false
for (let i = 0; i < slicedFeatures.length; i++) {
const f = slicedFeatures[i]
const canonicalChromosome = genome ? genome.getChromosomeName(f.chr) : f.chr
if (canonicalChromosome !== chr) {
if (allFeatures.length === 0) {
continue //adjacent chr to the left
} else {
break //adjacent chr to the right
}
}
if (f.start > end) {
allFeatures.push(f) // First feature beyond interval
break
}
if (f.end >= start && f.start <= end) {
if (!inInterval) {
inInterval = true
if (i > 0) {
allFeatures.push(slicedFeatures[i - 1])
} else {
// TODO -- get block before this one for first feature;
}
}
allFeatures.push(f)
}
}
}
allFeatures.sort(function (a, b) {
return a.start - b.start
})
return allFeatures
}
}
async getIndex() {
if (this.index) {
return this.index
} else if (this.config.indexURL) {
this.index = await this.loadIndex()
return this.index
}
}
/**
* Return a Promise for the async loaded index
*/
async loadIndex() {
const indexURL = this.config.indexURL
return loadIndex(indexURL, this.config, this.genome)
}
async loadFeaturesFromDataURI() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const plain = BGZip.decodeDataURI(this.dataURI)
let dataWrapper = getDataWrapper(plain)
this.header = await this.parser.parseHeader(dataWrapper)
if (this.header instanceof String && this.header.startsWith("##gff-version 3")) {
this.format = 'gff3'
}
dataWrapper = getDataWrapper(plain)
this.features = await this.parser.parseFeatures(dataWrapper)
return this.features
}
}
}
export default FeatureFileReader
| loadFeaturesWithIndex | identifier_name |
featureFileReader.js | /*
* The MIT License (MIT)
*
* Copyright (c) 2014 Broad Institute
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import FeatureParser from "./featureParser.js"
import SegParser from "./segParser.js"
import VcfParser from "../variant/vcfParser.js"
import {BGZip, FileUtils, igvxhr, URIUtils} from "../../node_modules/igv-utils/src/index.js"
import {buildOptions, isDataURL} from "../util/igvUtils.js"
import GWASParser from "../gwas/gwasParser.js"
import AEDParser from "../aed/AEDParser.js"
import {loadIndex} from "../bam/indexFactory.js"
import getDataWrapper from "./dataWrapper.js"
import BGZLineReader from "../util/bgzLineReader.js"
import BGZBlockLoader from "../bam/bgzBlockLoader.js"
/**
* Reader for "bed like" files (tab delimited files with 1 feature per line: bed, gff, vcf, etc)
*
* @param config
* @constructor
*/
class FeatureFileReader {
constructor(config, genome) {
var uriParts
this.config = config || {}
this.genome = genome
this.indexURL = config.indexURL
this.indexed = config.indexed || this.indexURL !== undefined
this.queryable = this.indexed
if (FileUtils.isFile(this.config.url)) {
this.filename = this.config.url.name
} else if (isDataURL(this.config.url)) {
this.indexed = false // by definition
this.dataURI = config.url
} else {
uriParts = URIUtils.parseUri(this.config.url)
this.filename = config.filename || uriParts.file
}
this.parser = this.getParser(this.config)
if (this.config.format === "vcf" && !this.config.indexURL) {
console.warn("Warning: index file not specified. The entire vcf file will be loaded.")
}
}
async defaultVisibilityWindow() {
if (this.config.indexURL) {
const index = await this.getIndex()
if (index && index.lastBlockPosition) {
let gl = 0
const s = 10000
for (let c of index.chromosomeNames) {
const chromosome = this.genome.getChromosome(c)
if (chromosome) {
gl += chromosome.bpLength
}
}
return Math.round((gl / index.lastBlockPosition) * s)
}
}
}
/**
* Return a promise to load features for the genomic interval
* @param chr
* @param start
* @param end
*/
async readFeatures(chr, start, end) {
const index = await this.getIndex()
if (index) {
this.indexed = true
return this.loadFeaturesWithIndex(chr, start, end)
} else if (this.dataURI) {
this.indexed = false
return this.loadFeaturesFromDataURI()
} else {
this.indexed = false
return this.loadFeaturesNoIndex()
}
}
async readHeader() {
if (this.dataURI) {
await this.loadFeaturesFromDataURI(this.dataURI)
return this.header
} else {
if (this.config.indexURL) {
const index = await this.getIndex()
if (!index) {
// Note - it should be impossible to get here
throw new Error("Unable to load index: " + this.config.indexURL)
}
let dataWrapper
if (index.tabix) {
this._blockLoader = new BGZBlockLoader(this.config);
dataWrapper = new BGZLineReader(this.config)
} else {
// Tribble
const maxSize = Object.values(index.chrIndex)
.flatMap(chr => chr.blocks)
.map(block => block.max)
.reduce((previous, current) =>
Math.min(previous, current), Number.MAX_SAFE_INTEGER)
const options = buildOptions(this.config, {bgz: index.tabix, range: {start: 0, size: maxSize}})
const data = await igvxhr.loadString(this.config.url, options)
dataWrapper = getDataWrapper(data)
}
this.header = await this.parser.parseHeader(dataWrapper) // Cache header, might be needed to parse features
return this.header
} else {
// If this is a non-indexed file we will load all features in advance
const options = buildOptions(this.config)
const data = await igvxhr.loadString(this.config.url, options)
let dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
// Reset data wrapper and parse features
dataWrapper = getDataWrapper(data)
this.features = await this.parser.parseFeatures(dataWrapper) // cache features
return this.header
}
}
}
getParser(config) {
switch (config.format) {
case "vcf":
return new VcfParser(config)
case "seg" :
return new SegParser("seg")
case "mut":
return new SegParser("mut")
case "maf":
return new SegParser("maf")
case "gwas" :
return new GWASParser(config)
case "aed" :
return new AEDParser(config)
default:
return new FeatureParser(config)
}
}
async loadFeaturesNoIndex() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const options = buildOptions(this.config) // Add oauth token, if any
const data = await igvxhr.loadString(this.config.url, options)
if (!this.header) {
const dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
}
const dataWrapper = getDataWrapper(data)
const features = await this.parser.parseFeatures(dataWrapper) // <= PARSING DONE HERE
return features
}
}
async loadFeaturesWithIndex(chr, start, end) {
// insure that header has been loaded -- tabix _blockLoader is initialized as side effect
if(!this.dataURI && !this.header) {
await this.readHeader()
}
//console.log("Using index"
const config = this.config
const parser = this.parser
const tabix = this.index.tabix
const refId = tabix ? this.index.sequenceIndexMap[chr] : chr
if (refId === undefined) {
return []
}
const genome = this.genome
const chunks = this.index.chunksForRange(refId, start, end)
if (!chunks || chunks.length === 0) {
return []
} else {
const allFeatures = []
for (let chunk of chunks) {
let inflated
if (tabix) {
inflated = await this._blockLoader.getData(chunk.minv, chunk.maxv)
} else {
const options = buildOptions(config, {
range: {
start: chunk.minv.block,
size: chunk.maxv.block - chunk.minv.block + 1
}
})
inflated = await igvxhr.loadString(config.url, options)
}
const slicedData = chunk.minv.offset ? inflated.slice(chunk.minv.offset) : inflated
const dataWrapper = getDataWrapper(slicedData)
let slicedFeatures = await parser.parseFeatures(dataWrapper)
// Filter psuedo-features (e.g. created mates for VCF SV records)
slicedFeatures = slicedFeatures.filter(f => f._f === undefined)
// Filter features not in requested range.
let inInterval = false
for (let i = 0; i < slicedFeatures.length; i++) {
const f = slicedFeatures[i]
const canonicalChromosome = genome ? genome.getChromosomeName(f.chr) : f.chr
if (canonicalChromosome !== chr) {
if (allFeatures.length === 0) {
continue //adjacent chr to the left
} else {
break //adjacent chr to the right
}
}
if (f.start > end) {
allFeatures.push(f) // First feature beyond interval
break
}
if (f.end >= start && f.start <= end) {
if (!inInterval) {
inInterval = true
if (i > 0) {
allFeatures.push(slicedFeatures[i - 1])
} else {
// TODO -- get block before this one for first feature;
}
}
allFeatures.push(f)
}
}
}
allFeatures.sort(function (a, b) {
return a.start - b.start
})
return allFeatures
}
}
async getIndex() {
if (this.index) {
return this.index
} else if (this.config.indexURL) {
this.index = await this.loadIndex()
return this.index
}
}
/**
* Return a Promise for the async loaded index
*/
async loadIndex() {
const indexURL = this.config.indexURL
return loadIndex(indexURL, this.config, this.genome)
}
async loadFeaturesFromDataURI() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const plain = BGZip.decodeDataURI(this.dataURI)
let dataWrapper = getDataWrapper(plain)
this.header = await this.parser.parseHeader(dataWrapper)
if (this.header instanceof String && this.header.startsWith("##gff-version 3")) {
this.format = 'gff3'
}
dataWrapper = getDataWrapper(plain)
this.features = await this.parser.parseFeatures(dataWrapper)
return this.features |
export default FeatureFileReader | }
}
} | random_line_split |
featureFileReader.js | /*
* The MIT License (MIT)
*
* Copyright (c) 2014 Broad Institute
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import FeatureParser from "./featureParser.js"
import SegParser from "./segParser.js"
import VcfParser from "../variant/vcfParser.js"
import {BGZip, FileUtils, igvxhr, URIUtils} from "../../node_modules/igv-utils/src/index.js"
import {buildOptions, isDataURL} from "../util/igvUtils.js"
import GWASParser from "../gwas/gwasParser.js"
import AEDParser from "../aed/AEDParser.js"
import {loadIndex} from "../bam/indexFactory.js"
import getDataWrapper from "./dataWrapper.js"
import BGZLineReader from "../util/bgzLineReader.js"
import BGZBlockLoader from "../bam/bgzBlockLoader.js"
/**
* Reader for "bed like" files (tab delimited files with 1 feature per line: bed, gff, vcf, etc)
*
* @param config
* @constructor
*/
class FeatureFileReader {
constructor(config, genome) {
var uriParts
this.config = config || {}
this.genome = genome
this.indexURL = config.indexURL
this.indexed = config.indexed || this.indexURL !== undefined
this.queryable = this.indexed
if (FileUtils.isFile(this.config.url)) {
this.filename = this.config.url.name
} else if (isDataURL(this.config.url)) {
this.indexed = false // by definition
this.dataURI = config.url
} else {
uriParts = URIUtils.parseUri(this.config.url)
this.filename = config.filename || uriParts.file
}
this.parser = this.getParser(this.config)
if (this.config.format === "vcf" && !this.config.indexURL) {
console.warn("Warning: index file not specified. The entire vcf file will be loaded.")
}
}
async defaultVisibilityWindow() {
if (this.config.indexURL) {
const index = await this.getIndex()
if (index && index.lastBlockPosition) {
let gl = 0
const s = 10000
for (let c of index.chromosomeNames) {
const chromosome = this.genome.getChromosome(c)
if (chromosome) {
gl += chromosome.bpLength
}
}
return Math.round((gl / index.lastBlockPosition) * s)
}
}
}
/**
* Return a promise to load features for the genomic interval
* @param chr
* @param start
* @param end
*/
async readFeatures(chr, start, end) {
const index = await this.getIndex()
if (index) {
this.indexed = true
return this.loadFeaturesWithIndex(chr, start, end)
} else if (this.dataURI) {
this.indexed = false
return this.loadFeaturesFromDataURI()
} else {
this.indexed = false
return this.loadFeaturesNoIndex()
}
}
async readHeader() |
getParser(config) {
switch (config.format) {
case "vcf":
return new VcfParser(config)
case "seg" :
return new SegParser("seg")
case "mut":
return new SegParser("mut")
case "maf":
return new SegParser("maf")
case "gwas" :
return new GWASParser(config)
case "aed" :
return new AEDParser(config)
default:
return new FeatureParser(config)
}
}
async loadFeaturesNoIndex() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const options = buildOptions(this.config) // Add oauth token, if any
const data = await igvxhr.loadString(this.config.url, options)
if (!this.header) {
const dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
}
const dataWrapper = getDataWrapper(data)
const features = await this.parser.parseFeatures(dataWrapper) // <= PARSING DONE HERE
return features
}
}
async loadFeaturesWithIndex(chr, start, end) {
// insure that header has been loaded -- tabix _blockLoader is initialized as side effect
if(!this.dataURI && !this.header) {
await this.readHeader()
}
//console.log("Using index"
const config = this.config
const parser = this.parser
const tabix = this.index.tabix
const refId = tabix ? this.index.sequenceIndexMap[chr] : chr
if (refId === undefined) {
return []
}
const genome = this.genome
const chunks = this.index.chunksForRange(refId, start, end)
if (!chunks || chunks.length === 0) {
return []
} else {
const allFeatures = []
for (let chunk of chunks) {
let inflated
if (tabix) {
inflated = await this._blockLoader.getData(chunk.minv, chunk.maxv)
} else {
const options = buildOptions(config, {
range: {
start: chunk.minv.block,
size: chunk.maxv.block - chunk.minv.block + 1
}
})
inflated = await igvxhr.loadString(config.url, options)
}
const slicedData = chunk.minv.offset ? inflated.slice(chunk.minv.offset) : inflated
const dataWrapper = getDataWrapper(slicedData)
let slicedFeatures = await parser.parseFeatures(dataWrapper)
// Filter psuedo-features (e.g. created mates for VCF SV records)
slicedFeatures = slicedFeatures.filter(f => f._f === undefined)
// Filter features not in requested range.
let inInterval = false
for (let i = 0; i < slicedFeatures.length; i++) {
const f = slicedFeatures[i]
const canonicalChromosome = genome ? genome.getChromosomeName(f.chr) : f.chr
if (canonicalChromosome !== chr) {
if (allFeatures.length === 0) {
continue //adjacent chr to the left
} else {
break //adjacent chr to the right
}
}
if (f.start > end) {
allFeatures.push(f) // First feature beyond interval
break
}
if (f.end >= start && f.start <= end) {
if (!inInterval) {
inInterval = true
if (i > 0) {
allFeatures.push(slicedFeatures[i - 1])
} else {
// TODO -- get block before this one for first feature;
}
}
allFeatures.push(f)
}
}
}
allFeatures.sort(function (a, b) {
return a.start - b.start
})
return allFeatures
}
}
async getIndex() {
if (this.index) {
return this.index
} else if (this.config.indexURL) {
this.index = await this.loadIndex()
return this.index
}
}
/**
* Return a Promise for the async loaded index
*/
async loadIndex() {
const indexURL = this.config.indexURL
return loadIndex(indexURL, this.config, this.genome)
}
async loadFeaturesFromDataURI() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const plain = BGZip.decodeDataURI(this.dataURI)
let dataWrapper = getDataWrapper(plain)
this.header = await this.parser.parseHeader(dataWrapper)
if (this.header instanceof String && this.header.startsWith("##gff-version 3")) {
this.format = 'gff3'
}
dataWrapper = getDataWrapper(plain)
this.features = await this.parser.parseFeatures(dataWrapper)
return this.features
}
}
}
export default FeatureFileReader
| {
if (this.dataURI) {
await this.loadFeaturesFromDataURI(this.dataURI)
return this.header
} else {
if (this.config.indexURL) {
const index = await this.getIndex()
if (!index) {
// Note - it should be impossible to get here
throw new Error("Unable to load index: " + this.config.indexURL)
}
let dataWrapper
if (index.tabix) {
this._blockLoader = new BGZBlockLoader(this.config);
dataWrapper = new BGZLineReader(this.config)
} else {
// Tribble
const maxSize = Object.values(index.chrIndex)
.flatMap(chr => chr.blocks)
.map(block => block.max)
.reduce((previous, current) =>
Math.min(previous, current), Number.MAX_SAFE_INTEGER)
const options = buildOptions(this.config, {bgz: index.tabix, range: {start: 0, size: maxSize}})
const data = await igvxhr.loadString(this.config.url, options)
dataWrapper = getDataWrapper(data)
}
this.header = await this.parser.parseHeader(dataWrapper) // Cache header, might be needed to parse features
return this.header
} else {
// If this is a non-indexed file we will load all features in advance
const options = buildOptions(this.config)
const data = await igvxhr.loadString(this.config.url, options)
let dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
// Reset data wrapper and parse features
dataWrapper = getDataWrapper(data)
this.features = await this.parser.parseFeatures(dataWrapper) // cache features
return this.header
}
}
} | identifier_body |
featureFileReader.js | /*
* The MIT License (MIT)
*
* Copyright (c) 2014 Broad Institute
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import FeatureParser from "./featureParser.js"
import SegParser from "./segParser.js"
import VcfParser from "../variant/vcfParser.js"
import {BGZip, FileUtils, igvxhr, URIUtils} from "../../node_modules/igv-utils/src/index.js"
import {buildOptions, isDataURL} from "../util/igvUtils.js"
import GWASParser from "../gwas/gwasParser.js"
import AEDParser from "../aed/AEDParser.js"
import {loadIndex} from "../bam/indexFactory.js"
import getDataWrapper from "./dataWrapper.js"
import BGZLineReader from "../util/bgzLineReader.js"
import BGZBlockLoader from "../bam/bgzBlockLoader.js"
/**
* Reader for "bed like" files (tab delimited files with 1 feature per line: bed, gff, vcf, etc)
*
* @param config
* @constructor
*/
class FeatureFileReader {
constructor(config, genome) {
var uriParts
this.config = config || {}
this.genome = genome
this.indexURL = config.indexURL
this.indexed = config.indexed || this.indexURL !== undefined
this.queryable = this.indexed
if (FileUtils.isFile(this.config.url)) {
this.filename = this.config.url.name
} else if (isDataURL(this.config.url)) {
this.indexed = false // by definition
this.dataURI = config.url
} else {
uriParts = URIUtils.parseUri(this.config.url)
this.filename = config.filename || uriParts.file
}
this.parser = this.getParser(this.config)
if (this.config.format === "vcf" && !this.config.indexURL) {
console.warn("Warning: index file not specified. The entire vcf file will be loaded.")
}
}
async defaultVisibilityWindow() {
if (this.config.indexURL) {
const index = await this.getIndex()
if (index && index.lastBlockPosition) {
let gl = 0
const s = 10000
for (let c of index.chromosomeNames) {
const chromosome = this.genome.getChromosome(c)
if (chromosome) {
gl += chromosome.bpLength
}
}
return Math.round((gl / index.lastBlockPosition) * s)
}
}
}
/**
* Return a promise to load features for the genomic interval
* @param chr
* @param start
* @param end
*/
async readFeatures(chr, start, end) {
const index = await this.getIndex()
if (index) {
this.indexed = true
return this.loadFeaturesWithIndex(chr, start, end)
} else if (this.dataURI) {
this.indexed = false
return this.loadFeaturesFromDataURI()
} else {
this.indexed = false
return this.loadFeaturesNoIndex()
}
}
async readHeader() {
if (this.dataURI) {
await this.loadFeaturesFromDataURI(this.dataURI)
return this.header
} else {
if (this.config.indexURL) {
const index = await this.getIndex()
if (!index) {
// Note - it should be impossible to get here
throw new Error("Unable to load index: " + this.config.indexURL)
}
let dataWrapper
if (index.tabix) {
this._blockLoader = new BGZBlockLoader(this.config);
dataWrapper = new BGZLineReader(this.config)
} else {
// Tribble
const maxSize = Object.values(index.chrIndex)
.flatMap(chr => chr.blocks)
.map(block => block.max)
.reduce((previous, current) =>
Math.min(previous, current), Number.MAX_SAFE_INTEGER)
const options = buildOptions(this.config, {bgz: index.tabix, range: {start: 0, size: maxSize}})
const data = await igvxhr.loadString(this.config.url, options)
dataWrapper = getDataWrapper(data)
}
this.header = await this.parser.parseHeader(dataWrapper) // Cache header, might be needed to parse features
return this.header
} else {
// If this is a non-indexed file we will load all features in advance
const options = buildOptions(this.config)
const data = await igvxhr.loadString(this.config.url, options)
let dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
// Reset data wrapper and parse features
dataWrapper = getDataWrapper(data)
this.features = await this.parser.parseFeatures(dataWrapper) // cache features
return this.header
}
}
}
getParser(config) {
switch (config.format) {
case "vcf":
return new VcfParser(config)
case "seg" :
return new SegParser("seg")
case "mut":
return new SegParser("mut")
case "maf":
return new SegParser("maf")
case "gwas" :
return new GWASParser(config)
case "aed" :
return new AEDParser(config)
default:
return new FeatureParser(config)
}
}
async loadFeaturesNoIndex() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const options = buildOptions(this.config) // Add oauth token, if any
const data = await igvxhr.loadString(this.config.url, options)
if (!this.header) {
const dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
}
const dataWrapper = getDataWrapper(data)
const features = await this.parser.parseFeatures(dataWrapper) // <= PARSING DONE HERE
return features
}
}
async loadFeaturesWithIndex(chr, start, end) {
// insure that header has been loaded -- tabix _blockLoader is initialized as side effect
if(!this.dataURI && !this.header) |
//console.log("Using index"
const config = this.config
const parser = this.parser
const tabix = this.index.tabix
const refId = tabix ? this.index.sequenceIndexMap[chr] : chr
if (refId === undefined) {
return []
}
const genome = this.genome
const chunks = this.index.chunksForRange(refId, start, end)
if (!chunks || chunks.length === 0) {
return []
} else {
const allFeatures = []
for (let chunk of chunks) {
let inflated
if (tabix) {
inflated = await this._blockLoader.getData(chunk.minv, chunk.maxv)
} else {
const options = buildOptions(config, {
range: {
start: chunk.minv.block,
size: chunk.maxv.block - chunk.minv.block + 1
}
})
inflated = await igvxhr.loadString(config.url, options)
}
const slicedData = chunk.minv.offset ? inflated.slice(chunk.minv.offset) : inflated
const dataWrapper = getDataWrapper(slicedData)
let slicedFeatures = await parser.parseFeatures(dataWrapper)
// Filter psuedo-features (e.g. created mates for VCF SV records)
slicedFeatures = slicedFeatures.filter(f => f._f === undefined)
// Filter features not in requested range.
let inInterval = false
for (let i = 0; i < slicedFeatures.length; i++) {
const f = slicedFeatures[i]
const canonicalChromosome = genome ? genome.getChromosomeName(f.chr) : f.chr
if (canonicalChromosome !== chr) {
if (allFeatures.length === 0) {
continue //adjacent chr to the left
} else {
break //adjacent chr to the right
}
}
if (f.start > end) {
allFeatures.push(f) // First feature beyond interval
break
}
if (f.end >= start && f.start <= end) {
if (!inInterval) {
inInterval = true
if (i > 0) {
allFeatures.push(slicedFeatures[i - 1])
} else {
// TODO -- get block before this one for first feature;
}
}
allFeatures.push(f)
}
}
}
allFeatures.sort(function (a, b) {
return a.start - b.start
})
return allFeatures
}
}
async getIndex() {
if (this.index) {
return this.index
} else if (this.config.indexURL) {
this.index = await this.loadIndex()
return this.index
}
}
/**
* Return a Promise for the async loaded index
*/
async loadIndex() {
const indexURL = this.config.indexURL
return loadIndex(indexURL, this.config, this.genome)
}
async loadFeaturesFromDataURI() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const plain = BGZip.decodeDataURI(this.dataURI)
let dataWrapper = getDataWrapper(plain)
this.header = await this.parser.parseHeader(dataWrapper)
if (this.header instanceof String && this.header.startsWith("##gff-version 3")) {
this.format = 'gff3'
}
dataWrapper = getDataWrapper(plain)
this.features = await this.parser.parseFeatures(dataWrapper)
return this.features
}
}
}
export default FeatureFileReader
| {
await this.readHeader()
} | conditional_block |
client.go | package metrics
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
)
// TODO Instrumentation? To get statistics?
// TODO Authorization / Authentication ?
// More detailed error
type HawkularClientError struct {
msg string
Code int
}
func (self *HawkularClientError) Error() string {
return fmt.Sprintf("Hawkular returned status code %d, error message: %s", self.Code, self.msg)
}
// Client creation and instance config
const (
base_url string = "hawkular/metrics"
timeout time.Duration = time.Duration(30 * time.Second)
)
type Parameters struct {
Tenant string // Technically optional, but requires setting Tenant() option everytime
Host string
Path string // Modifieral
}
type Client struct {
Tenant string
url *url.URL
client *http.Client
}
type HawkularClient interface {
Send(*http.Request) (*http.Response, error)
}
// Modifiers
type Modifier func(*http.Request) error
// Override function to replace the Tenant (defaults to Client default)
func Tenant(tenant string) Modifier {
return func(r *http.Request) error {
r.Header.Set("Hawkular-Tenant", tenant)
return nil
}
}
// Add payload to the request
func Data(data interface{}) Modifier {
return func(r *http.Request) error {
jsonb, err := json.Marshal(data)
if err != nil {
return err
}
b := bytes.NewBuffer(jsonb)
rc := ioutil.NopCloser(b)
r.Body = rc
// fmt.Printf("Sending: %s\n", string(jsonb))
if b != nil {
r.ContentLength = int64(b.Len())
}
return nil
}
}
func (self *Client) Url(method string, e ...Endpoint) Modifier {
// TODO Create composite URLs? Add().Add().. etc? Easier to modify on the fly..
return func(r *http.Request) error {
u := self.createUrl(e...)
r.URL = u
r.Method = method
return nil
}
}
// Filters for querying
type Filter func(r *http.Request)
func Filters(f ...Filter) Modifier {
return func(r *http.Request) error {
for _, filter := range f {
filter(r)
}
return nil // Or should filter return err?
}
}
// Add query parameters
func Param(k string, v string) Filter {
return func(r *http.Request) {
q := r.URL.Query()
q.Set(k, v)
r.URL.RawQuery = q.Encode()
}
}
func TypeFilter(t MetricType) Filter {
return Param("type", t.shortForm())
}
func TagsFilter(t map[string]string) Filter {
j := tagsEncoder(t)
return Param("tags", j)
}
// Requires HWKMETRICS-233
func IdFilter(regexp string) Filter {
return Param("id", regexp)
}
func StartTimeFilter(duration time.Duration) Filter {
return Param("start", strconv.Itoa(int(duration)))
}
func EndTimeFilter(duration time.Duration) Filter {
return Param("end", strconv.Itoa(int(duration)))
}
func BucketsFilter(buckets int) Filter {
return Param("buckets", strconv.Itoa(buckets))
}
// The SEND method..
func (self *Client) createRequest() *http.Request {
req := &http.Request{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: self.url.Host,
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Hawkular-Tenant", self.Tenant)
return req
}
func (self *Client) Send(o ...Modifier) (*http.Response, error) {
// Initialize
r := self.createRequest()
// Run all the modifiers
for _, f := range o {
err := f(r)
if err != nil {
return nil, err
}
}
return self.client.Do(r)
}
// Commands
func prepend(slice []Modifier, a ...Modifier) []Modifier {
p := make([]Modifier, 0, len(slice)+len(a))
p = append(p, a...)
p = append(p, slice...)
return p
}
// Create new Definition
func (self *Client) Create(md MetricDefinition, o ...Modifier) (bool, error) {
// Keep the order, add custom prepend
o = prepend(o, self.Url("POST", TypeEndpoint(md.Type)), Data(md))
r, err := self.Send(o...)
if err != nil {
return false, err
}
defer r.Body.Close()
if r.StatusCode > 399 {
err = self.parseErrorResponse(r)
if err, ok := err.(*HawkularClientError); ok {
if err.Code != http.StatusConflict {
return false, err
} else {
return false, nil
}
}
return false, err
}
return true, nil
}
// Fetch definitions
func (self *Client) Definitions(o ...Modifier) ([]*MetricDefinition, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(Generic)))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
md := []*MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return md, err
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Update tags
func (self *Client) UpdateTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("PUT", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()), Data(tags))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Delete given tags from the definition
func (self *Client) DeleteTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("DELETE", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint(), TagsEndpoint(tags)))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Fetch metric definition tags
func (self *Client) Tags(t MetricType, id string, o ...Modifier) (map[string]string, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
tags := make(map[string]string)
if b != nil {
if err = json.Unmarshal(b, &tags); err != nil {
return nil, err
}
}
return tags, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Write datapoints to the server
func (self *Client) Write(metrics []MetricHeader, o ...Modifier) error {
if len(metrics) > 0 {
mHs := make(map[MetricType][]MetricHeader)
for _, m := range metrics {
if _, found := mHs[m.Type]; !found {
mHs[m.Type] = make([]MetricHeader, 0, 1)
}
mHs[m.Type] = append(mHs[m.Type], m)
}
wg := &sync.WaitGroup{}
errorsChan := make(chan error, len(mHs))
for k, v := range mHs {
wg.Add(1)
go func(k MetricType, v []MetricHeader) {
defer wg.Done()
// Should be sorted and splitted by type & tenant..
on := o
on = prepend(on, self.Url("POST", TypeEndpoint(k), DataEndpoint()), Data(v))
r, err := self.Send(on...)
if err != nil {
errorsChan <- err
return
}
defer r.Body.Close()
if r.StatusCode > 399 {
errorsChan <- self.parseErrorResponse(r)
}
}(k, v)
}
wg.Wait()
select {
case err, ok := <-errorsChan:
if ok {
return err
}
// If channel is closed, we're done
default:
// Nothing to do
}
}
return nil
}
// Read data from the server
func (self *Client) ReadMetric(t MetricType, id string, o ...Modifier) ([]*Datapoint, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), DataEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
// Check for GaugeBucketpoint and so on for the rest.. uh
dp := []*Datapoint{}
if b != nil {
if err = json.Unmarshal(b, &dp); err != nil {
return nil, err
}
}
return dp, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Initialization
func NewHawkularClient(p Parameters) (*Client, error) {
if p.Path == "" |
u := &url.URL{
Host: p.Host,
Path: p.Path,
Scheme: "http",
Opaque: fmt.Sprintf("//%s/%s", p.Host, p.Path),
}
return &Client{
url: u,
Tenant: p.Tenant,
client: &http.Client{
Timeout: timeout,
},
}, nil
}
// Public functions
// Older functions..
// Return a single definition
func (self *Client) Definition(t MetricType, id string) (*MetricDefinition, error) {
url := self.singleMetricsUrl(t, id)
b, err := self.process(url, "GET", nil)
if err != nil {
return nil, err
}
md := MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return &md, nil
}
// Read single Gauge metric's datapoints.
// TODO: Remove and replace with better Read properties? Perhaps with iterators?
func (self *Client) SingleGaugeMetric(id string, options map[string]string) ([]*Datapoint, error) {
id = cleanId(id)
u := self.paramUrl(self.dataUrl(self.singleMetricsUrl(Gauge, id)), options)
// fmt.Printf("Receiving for %s, from: %s\n", self.Tenant, u)
b, err := self.process(u, "GET", nil)
if err != nil {
return nil, err
}
metrics := []*Datapoint{}
if b != nil {
// fmt.Printf("Received: %s\n", string(b))
if err = json.Unmarshal(b, &metrics); err != nil {
return nil, err
}
}
return metrics, nil
}
// HTTP Helper functions
func cleanId(id string) string {
return url.QueryEscape(id)
}
// Override default http.NewRequest to avoid url.Parse which has a bug (removes valid %2F)
func (self *Client) newRequest(url *url.URL, method string, body io.Reader) (*http.Request, error) {
rc, ok := body.(io.ReadCloser)
if !ok && body != nil {
rc = ioutil.NopCloser(body)
}
req := &http.Request{
Method: method,
URL: url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Body: rc,
Host: url.Host,
}
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
}
}
return req, nil
}
// Helper function that transforms struct to json and fetches the correct tenant information
// TODO: Try the decorator pattern to replace all these simple functions?
func (self *Client) process(url *url.URL, method string, data interface{}) ([]byte, error) {
jsonb, err := json.Marshal(&data)
if err != nil {
return nil, err
}
return self.send(url, method, jsonb)
}
func (self *Client) send(url *url.URL, method string, json []byte) ([]byte, error) {
// Have to replicate http.NewRequest here to avoid calling of url.Parse,
// which has a bug when it comes to encoded url
req, _ := self.newRequest(url, method, bytes.NewBuffer(json))
req.Header.Add("Content-Type", "application/json")
// if len(tenant) > 0 {
// req.Header.Add("Hawkular-Tenant", tenant)
// } else {
req.Header.Add("Hawkular-Tenant", self.Tenant)
// }
// fmt.Printf("curl -X %s -H 'Hawkular-Tenant: %s' %s\n", req.Method, req.Header.Get("Hawkular-Tenant"), req.URL)
resp, err := self.client.Do(req)
// fmt.Printf("%s\n", resp.Header.Get("Content-Length"))
// fmt.Printf("%d\n", resp.StatusCode)
if err != nil {
return nil, err
}
// fmt.Printf("Received bytes: %d\n", resp.ContentLength)
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(resp.Body)
return b, err
} else if resp.StatusCode > 399 {
return nil, self.parseErrorResponse(resp)
} else {
return nil, nil // Nothing to answer..
}
}
func (self *Client) parseErrorResponse(resp *http.Response) error {
// Parse error messages here correctly..
reply, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be read: %s", err.Error()),
}
}
details := &HawkularError{}
err = json.Unmarshal(reply, details)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be parsed: %s", err.Error()),
}
}
return &HawkularClientError{Code: resp.StatusCode,
msg: details.ErrorMsg,
}
}
// URL functions (...)
type Endpoint func(u *url.URL)
func (self *Client) createUrl(e ...Endpoint) *url.URL {
mu := *self.url
for _, f := range e {
f(&mu)
}
return &mu
}
func TypeEndpoint(t MetricType) Endpoint {
return func(u *url.URL) {
addToUrl(u, t.String())
}
}
func SingleMetricEndpoint(id string) Endpoint {
return func(u *url.URL) {
addToUrl(u, url.QueryEscape(id))
}
}
func TagEndpoint() Endpoint {
return func(u *url.URL) {
addToUrl(u, "tags")
}
}
func TagsEndpoint(tags map[string]string) Endpoint {
return func(u *url.URL) {
addToUrl(u, tagsEncoder(tags))
}
}
func DataEndpoint() Endpoint {
return func(u *url.URL) {
addToUrl(u, "data")
}
}
func (self *Client) metricsUrl(metricType MetricType) *url.URL {
mu := *self.url
addToUrl(&mu, metricType.String())
return &mu
}
func (self *Client) singleMetricsUrl(metricType MetricType, id string) *url.URL {
mu := self.metricsUrl(metricType)
addToUrl(mu, id)
return mu
}
func (self *Client) tagsUrl(mt MetricType, id string) *url.URL {
mu := self.singleMetricsUrl(mt, id)
addToUrl(mu, "tags")
return mu
}
func (self *Client) dataUrl(url *url.URL) *url.URL {
addToUrl(url, "data")
return url
}
func addToUrl(u *url.URL, s string) *url.URL {
u.Opaque = fmt.Sprintf("%s/%s", u.Opaque, s)
return u
}
func tagsEncoder(t map[string]string) string {
tags := make([]string, 0, len(t))
for k, v := range t {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
}
j := strings.Join(tags, ",")
return j
}
func (self *Client) paramUrl(u *url.URL, options map[string]string) *url.URL {
q := u.Query()
for k, v := range options {
q.Set(k, v)
}
u.RawQuery = q.Encode()
return u
}
| {
p.Path = base_url
} | conditional_block |
client.go | package metrics
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
)
// TODO Instrumentation? To get statistics?
// TODO Authorization / Authentication ?
// More detailed error
type HawkularClientError struct {
msg string
Code int
}
func (self *HawkularClientError) Error() string {
return fmt.Sprintf("Hawkular returned status code %d, error message: %s", self.Code, self.msg)
}
// Client creation and instance config
const (
base_url string = "hawkular/metrics"
timeout time.Duration = time.Duration(30 * time.Second)
)
type Parameters struct {
Tenant string // Technically optional, but requires setting Tenant() option everytime
Host string
Path string // Modifieral
}
type Client struct {
Tenant string
url *url.URL
client *http.Client
}
type HawkularClient interface {
Send(*http.Request) (*http.Response, error)
}
// Modifiers
type Modifier func(*http.Request) error
// Override function to replace the Tenant (defaults to Client default)
func Tenant(tenant string) Modifier {
return func(r *http.Request) error {
r.Header.Set("Hawkular-Tenant", tenant)
return nil
}
}
// Add payload to the request
func Data(data interface{}) Modifier {
return func(r *http.Request) error {
jsonb, err := json.Marshal(data)
if err != nil {
return err
}
b := bytes.NewBuffer(jsonb)
rc := ioutil.NopCloser(b)
r.Body = rc
// fmt.Printf("Sending: %s\n", string(jsonb))
if b != nil {
r.ContentLength = int64(b.Len())
}
return nil
}
}
func (self *Client) Url(method string, e ...Endpoint) Modifier {
// TODO Create composite URLs? Add().Add().. etc? Easier to modify on the fly..
return func(r *http.Request) error {
u := self.createUrl(e...)
r.URL = u
r.Method = method
return nil
}
}
// Filters for querying
type Filter func(r *http.Request)
func Filters(f ...Filter) Modifier {
return func(r *http.Request) error {
for _, filter := range f {
filter(r)
}
return nil // Or should filter return err?
}
}
// Add query parameters
func Param(k string, v string) Filter {
return func(r *http.Request) {
q := r.URL.Query()
q.Set(k, v)
r.URL.RawQuery = q.Encode()
}
}
func TypeFilter(t MetricType) Filter {
return Param("type", t.shortForm())
}
func TagsFilter(t map[string]string) Filter {
j := tagsEncoder(t)
return Param("tags", j)
}
// Requires HWKMETRICS-233
func IdFilter(regexp string) Filter {
return Param("id", regexp)
}
func StartTimeFilter(duration time.Duration) Filter {
return Param("start", strconv.Itoa(int(duration)))
}
func EndTimeFilter(duration time.Duration) Filter {
return Param("end", strconv.Itoa(int(duration)))
}
func BucketsFilter(buckets int) Filter {
return Param("buckets", strconv.Itoa(buckets))
}
// The SEND method..
func (self *Client) createRequest() *http.Request {
req := &http.Request{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: self.url.Host,
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Hawkular-Tenant", self.Tenant)
return req
}
func (self *Client) Send(o ...Modifier) (*http.Response, error) {
// Initialize
r := self.createRequest()
// Run all the modifiers
for _, f := range o {
err := f(r)
if err != nil {
return nil, err
}
}
return self.client.Do(r)
}
// Commands
func prepend(slice []Modifier, a ...Modifier) []Modifier {
p := make([]Modifier, 0, len(slice)+len(a))
p = append(p, a...)
p = append(p, slice...)
return p
}
// Create new Definition
func (self *Client) Create(md MetricDefinition, o ...Modifier) (bool, error) {
// Keep the order, add custom prepend
o = prepend(o, self.Url("POST", TypeEndpoint(md.Type)), Data(md))
r, err := self.Send(o...)
if err != nil {
return false, err
}
defer r.Body.Close()
if r.StatusCode > 399 {
err = self.parseErrorResponse(r)
if err, ok := err.(*HawkularClientError); ok {
if err.Code != http.StatusConflict {
return false, err
} else {
return false, nil
}
}
return false, err
}
return true, nil
}
// Fetch definitions
func (self *Client) Definitions(o ...Modifier) ([]*MetricDefinition, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(Generic)))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK { | md := []*MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return md, err
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Update tags
func (self *Client) UpdateTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("PUT", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()), Data(tags))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Delete given tags from the definition
func (self *Client) DeleteTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("DELETE", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint(), TagsEndpoint(tags)))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Fetch metric definition tags
func (self *Client) Tags(t MetricType, id string, o ...Modifier) (map[string]string, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
tags := make(map[string]string)
if b != nil {
if err = json.Unmarshal(b, &tags); err != nil {
return nil, err
}
}
return tags, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Write datapoints to the server
func (self *Client) Write(metrics []MetricHeader, o ...Modifier) error {
if len(metrics) > 0 {
mHs := make(map[MetricType][]MetricHeader)
for _, m := range metrics {
if _, found := mHs[m.Type]; !found {
mHs[m.Type] = make([]MetricHeader, 0, 1)
}
mHs[m.Type] = append(mHs[m.Type], m)
}
wg := &sync.WaitGroup{}
errorsChan := make(chan error, len(mHs))
for k, v := range mHs {
wg.Add(1)
go func(k MetricType, v []MetricHeader) {
defer wg.Done()
// Should be sorted and splitted by type & tenant..
on := o
on = prepend(on, self.Url("POST", TypeEndpoint(k), DataEndpoint()), Data(v))
r, err := self.Send(on...)
if err != nil {
errorsChan <- err
return
}
defer r.Body.Close()
if r.StatusCode > 399 {
errorsChan <- self.parseErrorResponse(r)
}
}(k, v)
}
wg.Wait()
select {
case err, ok := <-errorsChan:
if ok {
return err
}
// If channel is closed, we're done
default:
// Nothing to do
}
}
return nil
}
// Read data from the server
func (self *Client) ReadMetric(t MetricType, id string, o ...Modifier) ([]*Datapoint, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), DataEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
// Check for GaugeBucketpoint and so on for the rest.. uh
dp := []*Datapoint{}
if b != nil {
if err = json.Unmarshal(b, &dp); err != nil {
return nil, err
}
}
return dp, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Initialization
func NewHawkularClient(p Parameters) (*Client, error) {
if p.Path == "" {
p.Path = base_url
}
u := &url.URL{
Host: p.Host,
Path: p.Path,
Scheme: "http",
Opaque: fmt.Sprintf("//%s/%s", p.Host, p.Path),
}
return &Client{
url: u,
Tenant: p.Tenant,
client: &http.Client{
Timeout: timeout,
},
}, nil
}
// Public functions
// Older functions..
// Return a single definition
func (self *Client) Definition(t MetricType, id string) (*MetricDefinition, error) {
url := self.singleMetricsUrl(t, id)
b, err := self.process(url, "GET", nil)
if err != nil {
return nil, err
}
md := MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return &md, nil
}
// Read single Gauge metric's datapoints.
// TODO: Remove and replace with better Read properties? Perhaps with iterators?
func (self *Client) SingleGaugeMetric(id string, options map[string]string) ([]*Datapoint, error) {
id = cleanId(id)
u := self.paramUrl(self.dataUrl(self.singleMetricsUrl(Gauge, id)), options)
// fmt.Printf("Receiving for %s, from: %s\n", self.Tenant, u)
b, err := self.process(u, "GET", nil)
if err != nil {
return nil, err
}
metrics := []*Datapoint{}
if b != nil {
// fmt.Printf("Received: %s\n", string(b))
if err = json.Unmarshal(b, &metrics); err != nil {
return nil, err
}
}
return metrics, nil
}
// HTTP Helper functions
func cleanId(id string) string {
return url.QueryEscape(id)
}
// Override default http.NewRequest to avoid url.Parse which has a bug (removes valid %2F)
func (self *Client) newRequest(url *url.URL, method string, body io.Reader) (*http.Request, error) {
rc, ok := body.(io.ReadCloser)
if !ok && body != nil {
rc = ioutil.NopCloser(body)
}
req := &http.Request{
Method: method,
URL: url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Body: rc,
Host: url.Host,
}
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
}
}
return req, nil
}
// Helper function that transforms struct to json and fetches the correct tenant information
// TODO: Try the decorator pattern to replace all these simple functions?
func (self *Client) process(url *url.URL, method string, data interface{}) ([]byte, error) {
jsonb, err := json.Marshal(&data)
if err != nil {
return nil, err
}
return self.send(url, method, jsonb)
}
func (self *Client) send(url *url.URL, method string, json []byte) ([]byte, error) {
// Have to replicate http.NewRequest here to avoid calling of url.Parse,
// which has a bug when it comes to encoded url
req, _ := self.newRequest(url, method, bytes.NewBuffer(json))
req.Header.Add("Content-Type", "application/json")
// if len(tenant) > 0 {
// req.Header.Add("Hawkular-Tenant", tenant)
// } else {
req.Header.Add("Hawkular-Tenant", self.Tenant)
// }
// fmt.Printf("curl -X %s -H 'Hawkular-Tenant: %s' %s\n", req.Method, req.Header.Get("Hawkular-Tenant"), req.URL)
resp, err := self.client.Do(req)
// fmt.Printf("%s\n", resp.Header.Get("Content-Length"))
// fmt.Printf("%d\n", resp.StatusCode)
if err != nil {
return nil, err
}
// fmt.Printf("Received bytes: %d\n", resp.ContentLength)
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(resp.Body)
return b, err
} else if resp.StatusCode > 399 {
return nil, self.parseErrorResponse(resp)
} else {
return nil, nil // Nothing to answer..
}
}
func (self *Client) parseErrorResponse(resp *http.Response) error {
// Parse error messages here correctly..
reply, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be read: %s", err.Error()),
}
}
details := &HawkularError{}
err = json.Unmarshal(reply, details)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be parsed: %s", err.Error()),
}
}
return &HawkularClientError{Code: resp.StatusCode,
msg: details.ErrorMsg,
}
}
// URL functions (...)
type Endpoint func(u *url.URL)
func (self *Client) createUrl(e ...Endpoint) *url.URL {
mu := *self.url
for _, f := range e {
f(&mu)
}
return &mu
}
func TypeEndpoint(t MetricType) Endpoint {
return func(u *url.URL) {
addToUrl(u, t.String())
}
}
func SingleMetricEndpoint(id string) Endpoint {
return func(u *url.URL) {
addToUrl(u, url.QueryEscape(id))
}
}
func TagEndpoint() Endpoint {
return func(u *url.URL) {
addToUrl(u, "tags")
}
}
func TagsEndpoint(tags map[string]string) Endpoint {
return func(u *url.URL) {
addToUrl(u, tagsEncoder(tags))
}
}
func DataEndpoint() Endpoint {
return func(u *url.URL) {
addToUrl(u, "data")
}
}
func (self *Client) metricsUrl(metricType MetricType) *url.URL {
mu := *self.url
addToUrl(&mu, metricType.String())
return &mu
}
func (self *Client) singleMetricsUrl(metricType MetricType, id string) *url.URL {
mu := self.metricsUrl(metricType)
addToUrl(mu, id)
return mu
}
func (self *Client) tagsUrl(mt MetricType, id string) *url.URL {
mu := self.singleMetricsUrl(mt, id)
addToUrl(mu, "tags")
return mu
}
func (self *Client) dataUrl(url *url.URL) *url.URL {
addToUrl(url, "data")
return url
}
func addToUrl(u *url.URL, s string) *url.URL {
u.Opaque = fmt.Sprintf("%s/%s", u.Opaque, s)
return u
}
func tagsEncoder(t map[string]string) string {
tags := make([]string, 0, len(t))
for k, v := range t {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
}
j := strings.Join(tags, ",")
return j
}
func (self *Client) paramUrl(u *url.URL, options map[string]string) *url.URL {
q := u.Query()
for k, v := range options {
q.Set(k, v)
}
u.RawQuery = q.Encode()
return u
} | b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
} | random_line_split |
client.go | package metrics
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
)
// TODO Instrumentation? To get statistics?
// TODO Authorization / Authentication ?
// More detailed error
type HawkularClientError struct {
msg string
Code int
}
func (self *HawkularClientError) Error() string {
return fmt.Sprintf("Hawkular returned status code %d, error message: %s", self.Code, self.msg)
}
// Client creation and instance config
const (
base_url string = "hawkular/metrics"
timeout time.Duration = time.Duration(30 * time.Second)
)
type Parameters struct {
Tenant string // Technically optional, but requires setting Tenant() option everytime
Host string
Path string // Modifieral
}
type Client struct {
Tenant string
url *url.URL
client *http.Client
}
type HawkularClient interface {
Send(*http.Request) (*http.Response, error)
}
// Modifiers
type Modifier func(*http.Request) error
// Override function to replace the Tenant (defaults to Client default)
func Tenant(tenant string) Modifier {
return func(r *http.Request) error {
r.Header.Set("Hawkular-Tenant", tenant)
return nil
}
}
// Add payload to the request
func Data(data interface{}) Modifier {
return func(r *http.Request) error {
jsonb, err := json.Marshal(data)
if err != nil {
return err
}
b := bytes.NewBuffer(jsonb)
rc := ioutil.NopCloser(b)
r.Body = rc
// fmt.Printf("Sending: %s\n", string(jsonb))
if b != nil {
r.ContentLength = int64(b.Len())
}
return nil
}
}
func (self *Client) Url(method string, e ...Endpoint) Modifier {
// TODO Create composite URLs? Add().Add().. etc? Easier to modify on the fly..
return func(r *http.Request) error {
u := self.createUrl(e...)
r.URL = u
r.Method = method
return nil
}
}
// Filters for querying
type Filter func(r *http.Request)
func Filters(f ...Filter) Modifier {
return func(r *http.Request) error {
for _, filter := range f {
filter(r)
}
return nil // Or should filter return err?
}
}
// Add query parameters
func Param(k string, v string) Filter {
return func(r *http.Request) {
q := r.URL.Query()
q.Set(k, v)
r.URL.RawQuery = q.Encode()
}
}
func TypeFilter(t MetricType) Filter {
return Param("type", t.shortForm())
}
func TagsFilter(t map[string]string) Filter {
j := tagsEncoder(t)
return Param("tags", j)
}
// Requires HWKMETRICS-233
func IdFilter(regexp string) Filter {
return Param("id", regexp)
}
func StartTimeFilter(duration time.Duration) Filter {
return Param("start", strconv.Itoa(int(duration)))
}
func EndTimeFilter(duration time.Duration) Filter {
return Param("end", strconv.Itoa(int(duration)))
}
func BucketsFilter(buckets int) Filter {
return Param("buckets", strconv.Itoa(buckets))
}
// The SEND method..
func (self *Client) createRequest() *http.Request {
req := &http.Request{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: self.url.Host,
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Hawkular-Tenant", self.Tenant)
return req
}
func (self *Client) Send(o ...Modifier) (*http.Response, error) {
// Initialize
r := self.createRequest()
// Run all the modifiers
for _, f := range o {
err := f(r)
if err != nil {
return nil, err
}
}
return self.client.Do(r)
}
// Commands
func prepend(slice []Modifier, a ...Modifier) []Modifier {
p := make([]Modifier, 0, len(slice)+len(a))
p = append(p, a...)
p = append(p, slice...)
return p
}
// Create new Definition
func (self *Client) Create(md MetricDefinition, o ...Modifier) (bool, error) {
// Keep the order, add custom prepend
o = prepend(o, self.Url("POST", TypeEndpoint(md.Type)), Data(md))
r, err := self.Send(o...)
if err != nil {
return false, err
}
defer r.Body.Close()
if r.StatusCode > 399 {
err = self.parseErrorResponse(r)
if err, ok := err.(*HawkularClientError); ok {
if err.Code != http.StatusConflict {
return false, err
} else {
return false, nil
}
}
return false, err
}
return true, nil
}
// Fetch definitions
func (self *Client) Definitions(o ...Modifier) ([]*MetricDefinition, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(Generic)))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
md := []*MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return md, err
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Update tags
func (self *Client) UpdateTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("PUT", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()), Data(tags))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Delete given tags from the definition
func (self *Client) DeleteTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("DELETE", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint(), TagsEndpoint(tags)))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Fetch metric definition tags
func (self *Client) Tags(t MetricType, id string, o ...Modifier) (map[string]string, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
tags := make(map[string]string)
if b != nil {
if err = json.Unmarshal(b, &tags); err != nil {
return nil, err
}
}
return tags, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Write datapoints to the server
func (self *Client) Write(metrics []MetricHeader, o ...Modifier) error {
if len(metrics) > 0 {
mHs := make(map[MetricType][]MetricHeader)
for _, m := range metrics {
if _, found := mHs[m.Type]; !found {
mHs[m.Type] = make([]MetricHeader, 0, 1)
}
mHs[m.Type] = append(mHs[m.Type], m)
}
wg := &sync.WaitGroup{}
errorsChan := make(chan error, len(mHs))
for k, v := range mHs {
wg.Add(1)
go func(k MetricType, v []MetricHeader) {
defer wg.Done()
// Should be sorted and splitted by type & tenant..
on := o
on = prepend(on, self.Url("POST", TypeEndpoint(k), DataEndpoint()), Data(v))
r, err := self.Send(on...)
if err != nil {
errorsChan <- err
return
}
defer r.Body.Close()
if r.StatusCode > 399 {
errorsChan <- self.parseErrorResponse(r)
}
}(k, v)
}
wg.Wait()
select {
case err, ok := <-errorsChan:
if ok {
return err
}
// If channel is closed, we're done
default:
// Nothing to do
}
}
return nil
}
// Read data from the server
func (self *Client) ReadMetric(t MetricType, id string, o ...Modifier) ([]*Datapoint, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), DataEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
// Check for GaugeBucketpoint and so on for the rest.. uh
dp := []*Datapoint{}
if b != nil {
if err = json.Unmarshal(b, &dp); err != nil {
return nil, err
}
}
return dp, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Initialization
func NewHawkularClient(p Parameters) (*Client, error) {
if p.Path == "" {
p.Path = base_url
}
u := &url.URL{
Host: p.Host,
Path: p.Path,
Scheme: "http",
Opaque: fmt.Sprintf("//%s/%s", p.Host, p.Path),
}
return &Client{
url: u,
Tenant: p.Tenant,
client: &http.Client{
Timeout: timeout,
},
}, nil
}
// Public functions
// Older functions..
// Return a single definition
func (self *Client) Definition(t MetricType, id string) (*MetricDefinition, error) {
url := self.singleMetricsUrl(t, id)
b, err := self.process(url, "GET", nil)
if err != nil {
return nil, err
}
md := MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return &md, nil
}
// Read single Gauge metric's datapoints.
// TODO: Remove and replace with better Read properties? Perhaps with iterators?
func (self *Client) SingleGaugeMetric(id string, options map[string]string) ([]*Datapoint, error) {
id = cleanId(id)
u := self.paramUrl(self.dataUrl(self.singleMetricsUrl(Gauge, id)), options)
// fmt.Printf("Receiving for %s, from: %s\n", self.Tenant, u)
b, err := self.process(u, "GET", nil)
if err != nil {
return nil, err
}
metrics := []*Datapoint{}
if b != nil {
// fmt.Printf("Received: %s\n", string(b))
if err = json.Unmarshal(b, &metrics); err != nil {
return nil, err
}
}
return metrics, nil
}
// HTTP Helper functions
func cleanId(id string) string {
return url.QueryEscape(id)
}
// Override default http.NewRequest to avoid url.Parse which has a bug (removes valid %2F)
func (self *Client) newRequest(url *url.URL, method string, body io.Reader) (*http.Request, error) {
rc, ok := body.(io.ReadCloser)
if !ok && body != nil {
rc = ioutil.NopCloser(body)
}
req := &http.Request{
Method: method,
URL: url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Body: rc,
Host: url.Host,
}
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
}
}
return req, nil
}
// Helper function that transforms struct to json and fetches the correct tenant information
// TODO: Try the decorator pattern to replace all these simple functions?
func (self *Client) process(url *url.URL, method string, data interface{}) ([]byte, error) {
jsonb, err := json.Marshal(&data)
if err != nil {
return nil, err
}
return self.send(url, method, jsonb)
}
func (self *Client) send(url *url.URL, method string, json []byte) ([]byte, error) {
// Have to replicate http.NewRequest here to avoid calling of url.Parse,
// which has a bug when it comes to encoded url
req, _ := self.newRequest(url, method, bytes.NewBuffer(json))
req.Header.Add("Content-Type", "application/json")
// if len(tenant) > 0 {
// req.Header.Add("Hawkular-Tenant", tenant)
// } else {
req.Header.Add("Hawkular-Tenant", self.Tenant)
// }
// fmt.Printf("curl -X %s -H 'Hawkular-Tenant: %s' %s\n", req.Method, req.Header.Get("Hawkular-Tenant"), req.URL)
resp, err := self.client.Do(req)
// fmt.Printf("%s\n", resp.Header.Get("Content-Length"))
// fmt.Printf("%d\n", resp.StatusCode)
if err != nil {
return nil, err
}
// fmt.Printf("Received bytes: %d\n", resp.ContentLength)
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(resp.Body)
return b, err
} else if resp.StatusCode > 399 {
return nil, self.parseErrorResponse(resp)
} else {
return nil, nil // Nothing to answer..
}
}
func (self *Client) parseErrorResponse(resp *http.Response) error {
// Parse error messages here correctly..
reply, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be read: %s", err.Error()),
}
}
details := &HawkularError{}
err = json.Unmarshal(reply, details)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be parsed: %s", err.Error()),
}
}
return &HawkularClientError{Code: resp.StatusCode,
msg: details.ErrorMsg,
}
}
// URL functions (...)
type Endpoint func(u *url.URL)
func (self *Client) createUrl(e ...Endpoint) *url.URL {
mu := *self.url
for _, f := range e {
f(&mu)
}
return &mu
}
func TypeEndpoint(t MetricType) Endpoint {
return func(u *url.URL) {
addToUrl(u, t.String())
}
}
func SingleMetricEndpoint(id string) Endpoint {
return func(u *url.URL) {
addToUrl(u, url.QueryEscape(id))
}
}
func TagEndpoint() Endpoint |
func TagsEndpoint(tags map[string]string) Endpoint {
return func(u *url.URL) {
addToUrl(u, tagsEncoder(tags))
}
}
func DataEndpoint() Endpoint {
return func(u *url.URL) {
addToUrl(u, "data")
}
}
func (self *Client) metricsUrl(metricType MetricType) *url.URL {
mu := *self.url
addToUrl(&mu, metricType.String())
return &mu
}
func (self *Client) singleMetricsUrl(metricType MetricType, id string) *url.URL {
mu := self.metricsUrl(metricType)
addToUrl(mu, id)
return mu
}
func (self *Client) tagsUrl(mt MetricType, id string) *url.URL {
mu := self.singleMetricsUrl(mt, id)
addToUrl(mu, "tags")
return mu
}
func (self *Client) dataUrl(url *url.URL) *url.URL {
addToUrl(url, "data")
return url
}
func addToUrl(u *url.URL, s string) *url.URL {
u.Opaque = fmt.Sprintf("%s/%s", u.Opaque, s)
return u
}
func tagsEncoder(t map[string]string) string {
tags := make([]string, 0, len(t))
for k, v := range t {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
}
j := strings.Join(tags, ",")
return j
}
func (self *Client) paramUrl(u *url.URL, options map[string]string) *url.URL {
q := u.Query()
for k, v := range options {
q.Set(k, v)
}
u.RawQuery = q.Encode()
return u
}
| {
return func(u *url.URL) {
addToUrl(u, "tags")
}
} | identifier_body |
client.go | package metrics
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
)
// TODO Instrumentation? To get statistics?
// TODO Authorization / Authentication ?
// More detailed error
type HawkularClientError struct {
msg string
Code int
}
func (self *HawkularClientError) Error() string {
return fmt.Sprintf("Hawkular returned status code %d, error message: %s", self.Code, self.msg)
}
// Client creation and instance config
const (
base_url string = "hawkular/metrics"
timeout time.Duration = time.Duration(30 * time.Second)
)
type Parameters struct {
Tenant string // Technically optional, but requires setting Tenant() option everytime
Host string
Path string // Modifieral
}
type Client struct {
Tenant string
url *url.URL
client *http.Client
}
type HawkularClient interface {
Send(*http.Request) (*http.Response, error)
}
// Modifiers
type Modifier func(*http.Request) error
// Override function to replace the Tenant (defaults to Client default)
func Tenant(tenant string) Modifier {
return func(r *http.Request) error {
r.Header.Set("Hawkular-Tenant", tenant)
return nil
}
}
// Add payload to the request
func Data(data interface{}) Modifier {
return func(r *http.Request) error {
jsonb, err := json.Marshal(data)
if err != nil {
return err
}
b := bytes.NewBuffer(jsonb)
rc := ioutil.NopCloser(b)
r.Body = rc
// fmt.Printf("Sending: %s\n", string(jsonb))
if b != nil {
r.ContentLength = int64(b.Len())
}
return nil
}
}
func (self *Client) Url(method string, e ...Endpoint) Modifier {
// TODO Create composite URLs? Add().Add().. etc? Easier to modify on the fly..
return func(r *http.Request) error {
u := self.createUrl(e...)
r.URL = u
r.Method = method
return nil
}
}
// Filters for querying
type Filter func(r *http.Request)
func Filters(f ...Filter) Modifier {
return func(r *http.Request) error {
for _, filter := range f {
filter(r)
}
return nil // Or should filter return err?
}
}
// Add query parameters
func Param(k string, v string) Filter {
return func(r *http.Request) {
q := r.URL.Query()
q.Set(k, v)
r.URL.RawQuery = q.Encode()
}
}
func TypeFilter(t MetricType) Filter {
return Param("type", t.shortForm())
}
func TagsFilter(t map[string]string) Filter {
j := tagsEncoder(t)
return Param("tags", j)
}
// Requires HWKMETRICS-233
func IdFilter(regexp string) Filter {
return Param("id", regexp)
}
func StartTimeFilter(duration time.Duration) Filter {
return Param("start", strconv.Itoa(int(duration)))
}
func EndTimeFilter(duration time.Duration) Filter {
return Param("end", strconv.Itoa(int(duration)))
}
func BucketsFilter(buckets int) Filter {
return Param("buckets", strconv.Itoa(buckets))
}
// The SEND method..
func (self *Client) createRequest() *http.Request {
req := &http.Request{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: self.url.Host,
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Hawkular-Tenant", self.Tenant)
return req
}
func (self *Client) Send(o ...Modifier) (*http.Response, error) {
// Initialize
r := self.createRequest()
// Run all the modifiers
for _, f := range o {
err := f(r)
if err != nil {
return nil, err
}
}
return self.client.Do(r)
}
// Commands
func prepend(slice []Modifier, a ...Modifier) []Modifier {
p := make([]Modifier, 0, len(slice)+len(a))
p = append(p, a...)
p = append(p, slice...)
return p
}
// Create new Definition
func (self *Client) Create(md MetricDefinition, o ...Modifier) (bool, error) {
// Keep the order, add custom prepend
o = prepend(o, self.Url("POST", TypeEndpoint(md.Type)), Data(md))
r, err := self.Send(o...)
if err != nil {
return false, err
}
defer r.Body.Close()
if r.StatusCode > 399 {
err = self.parseErrorResponse(r)
if err, ok := err.(*HawkularClientError); ok {
if err.Code != http.StatusConflict {
return false, err
} else {
return false, nil
}
}
return false, err
}
return true, nil
}
// Fetch definitions
func (self *Client) Definitions(o ...Modifier) ([]*MetricDefinition, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(Generic)))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
md := []*MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return md, err
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Update tags
func (self *Client) UpdateTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("PUT", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()), Data(tags))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Delete given tags from the definition
func (self *Client) DeleteTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("DELETE", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint(), TagsEndpoint(tags)))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Fetch metric definition tags
func (self *Client) Tags(t MetricType, id string, o ...Modifier) (map[string]string, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
tags := make(map[string]string)
if b != nil {
if err = json.Unmarshal(b, &tags); err != nil {
return nil, err
}
}
return tags, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Write datapoints to the server
func (self *Client) Write(metrics []MetricHeader, o ...Modifier) error {
if len(metrics) > 0 {
mHs := make(map[MetricType][]MetricHeader)
for _, m := range metrics {
if _, found := mHs[m.Type]; !found {
mHs[m.Type] = make([]MetricHeader, 0, 1)
}
mHs[m.Type] = append(mHs[m.Type], m)
}
wg := &sync.WaitGroup{}
errorsChan := make(chan error, len(mHs))
for k, v := range mHs {
wg.Add(1)
go func(k MetricType, v []MetricHeader) {
defer wg.Done()
// Should be sorted and splitted by type & tenant..
on := o
on = prepend(on, self.Url("POST", TypeEndpoint(k), DataEndpoint()), Data(v))
r, err := self.Send(on...)
if err != nil {
errorsChan <- err
return
}
defer r.Body.Close()
if r.StatusCode > 399 {
errorsChan <- self.parseErrorResponse(r)
}
}(k, v)
}
wg.Wait()
select {
case err, ok := <-errorsChan:
if ok {
return err
}
// If channel is closed, we're done
default:
// Nothing to do
}
}
return nil
}
// Read data from the server
func (self *Client) ReadMetric(t MetricType, id string, o ...Modifier) ([]*Datapoint, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), DataEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
// Check for GaugeBucketpoint and so on for the rest.. uh
dp := []*Datapoint{}
if b != nil {
if err = json.Unmarshal(b, &dp); err != nil {
return nil, err
}
}
return dp, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Initialization
func NewHawkularClient(p Parameters) (*Client, error) {
if p.Path == "" {
p.Path = base_url
}
u := &url.URL{
Host: p.Host,
Path: p.Path,
Scheme: "http",
Opaque: fmt.Sprintf("//%s/%s", p.Host, p.Path),
}
return &Client{
url: u,
Tenant: p.Tenant,
client: &http.Client{
Timeout: timeout,
},
}, nil
}
// Public functions
// Older functions..
// Return a single definition
func (self *Client) Definition(t MetricType, id string) (*MetricDefinition, error) {
url := self.singleMetricsUrl(t, id)
b, err := self.process(url, "GET", nil)
if err != nil {
return nil, err
}
md := MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return &md, nil
}
// Read single Gauge metric's datapoints.
// TODO: Remove and replace with better Read properties? Perhaps with iterators?
func (self *Client) SingleGaugeMetric(id string, options map[string]string) ([]*Datapoint, error) {
id = cleanId(id)
u := self.paramUrl(self.dataUrl(self.singleMetricsUrl(Gauge, id)), options)
// fmt.Printf("Receiving for %s, from: %s\n", self.Tenant, u)
b, err := self.process(u, "GET", nil)
if err != nil {
return nil, err
}
metrics := []*Datapoint{}
if b != nil {
// fmt.Printf("Received: %s\n", string(b))
if err = json.Unmarshal(b, &metrics); err != nil {
return nil, err
}
}
return metrics, nil
}
// HTTP Helper functions
func cleanId(id string) string {
return url.QueryEscape(id)
}
// Override default http.NewRequest to avoid url.Parse which has a bug (removes valid %2F)
func (self *Client) newRequest(url *url.URL, method string, body io.Reader) (*http.Request, error) {
rc, ok := body.(io.ReadCloser)
if !ok && body != nil {
rc = ioutil.NopCloser(body)
}
req := &http.Request{
Method: method,
URL: url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Body: rc,
Host: url.Host,
}
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
}
}
return req, nil
}
// Helper function that transforms struct to json and fetches the correct tenant information
// TODO: Try the decorator pattern to replace all these simple functions?
func (self *Client) | (url *url.URL, method string, data interface{}) ([]byte, error) {
jsonb, err := json.Marshal(&data)
if err != nil {
return nil, err
}
return self.send(url, method, jsonb)
}
func (self *Client) send(url *url.URL, method string, json []byte) ([]byte, error) {
// Have to replicate http.NewRequest here to avoid calling of url.Parse,
// which has a bug when it comes to encoded url
req, _ := self.newRequest(url, method, bytes.NewBuffer(json))
req.Header.Add("Content-Type", "application/json")
// if len(tenant) > 0 {
// req.Header.Add("Hawkular-Tenant", tenant)
// } else {
req.Header.Add("Hawkular-Tenant", self.Tenant)
// }
// fmt.Printf("curl -X %s -H 'Hawkular-Tenant: %s' %s\n", req.Method, req.Header.Get("Hawkular-Tenant"), req.URL)
resp, err := self.client.Do(req)
// fmt.Printf("%s\n", resp.Header.Get("Content-Length"))
// fmt.Printf("%d\n", resp.StatusCode)
if err != nil {
return nil, err
}
// fmt.Printf("Received bytes: %d\n", resp.ContentLength)
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(resp.Body)
return b, err
} else if resp.StatusCode > 399 {
return nil, self.parseErrorResponse(resp)
} else {
return nil, nil // Nothing to answer..
}
}
func (self *Client) parseErrorResponse(resp *http.Response) error {
// Parse error messages here correctly..
reply, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be read: %s", err.Error()),
}
}
details := &HawkularError{}
err = json.Unmarshal(reply, details)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be parsed: %s", err.Error()),
}
}
return &HawkularClientError{Code: resp.StatusCode,
msg: details.ErrorMsg,
}
}
// URL functions (...)
type Endpoint func(u *url.URL)
func (self *Client) createUrl(e ...Endpoint) *url.URL {
mu := *self.url
for _, f := range e {
f(&mu)
}
return &mu
}
func TypeEndpoint(t MetricType) Endpoint {
return func(u *url.URL) {
addToUrl(u, t.String())
}
}
func SingleMetricEndpoint(id string) Endpoint {
return func(u *url.URL) {
addToUrl(u, url.QueryEscape(id))
}
}
func TagEndpoint() Endpoint {
return func(u *url.URL) {
addToUrl(u, "tags")
}
}
func TagsEndpoint(tags map[string]string) Endpoint {
return func(u *url.URL) {
addToUrl(u, tagsEncoder(tags))
}
}
func DataEndpoint() Endpoint {
return func(u *url.URL) {
addToUrl(u, "data")
}
}
func (self *Client) metricsUrl(metricType MetricType) *url.URL {
mu := *self.url
addToUrl(&mu, metricType.String())
return &mu
}
func (self *Client) singleMetricsUrl(metricType MetricType, id string) *url.URL {
mu := self.metricsUrl(metricType)
addToUrl(mu, id)
return mu
}
func (self *Client) tagsUrl(mt MetricType, id string) *url.URL {
mu := self.singleMetricsUrl(mt, id)
addToUrl(mu, "tags")
return mu
}
func (self *Client) dataUrl(url *url.URL) *url.URL {
addToUrl(url, "data")
return url
}
func addToUrl(u *url.URL, s string) *url.URL {
u.Opaque = fmt.Sprintf("%s/%s", u.Opaque, s)
return u
}
func tagsEncoder(t map[string]string) string {
tags := make([]string, 0, len(t))
for k, v := range t {
tags = append(tags, fmt.Sprintf("%s:%s", k, v))
}
j := strings.Join(tags, ",")
return j
}
func (self *Client) paramUrl(u *url.URL, options map[string]string) *url.URL {
q := u.Query()
for k, v := range options {
q.Set(k, v)
}
u.RawQuery = q.Encode()
return u
}
| process | identifier_name |
ifd.rs | //! Function for reading TIFF tags
use std::io::{self, Read, Seek};
use std::collections::{HashMap};
use super::stream::{ByteOrder, SmartReader, EndianReader};
use self::Value::{Unsigned, List};
macro_rules! tags {
{$(
$tag:ident
$val:expr;
)*} => {
/// TIFF tag
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum Tag {
$($tag,)*
Unknown(u16)
}
impl Tag {
pub fn from_u16(n: u16) -> Tag {
$(if n == $val { Tag::$tag } else)* {
Tag::Unknown(n)
}
}
}
}
}
// taken from https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf Appendix A
// TagName Value; // in HEx TagTYPE Number of Values
tags! {
NewSubfileType 254; // FE LONG 1
SubfileType 255; // FF SHORT 1
ImageWidth 256; // 100 SHORT or LONG 1
ImageLength 257; // 101 SHORT or LONG 1
BitsPerSample 258; // 102 SHORT SamplesPerPixel
Compression 259; // 103 SHORT 1
PhotometricInterpretation 262; // 106 SHORT
Threshholding 263; // 107 SHORT 1
CellWidth 264; // 108 SHORT 1
CellLength 265; // 109 SHORT 1
FillOrder 266; // 10A SHORT 1
DocumentName 269; // 10D ASCII
ImageDescription 270; // 10E ASCII
Make 271; // 10F ASCII
Model 272; // 110 ASCII
StripOffsets 273; // 111 SHORT or LONG StripsPerImage
Orientation 274; // 112 SHORT 1
SamplesPerPixel 277; // 115 SHORT 1
RowsPerStrip 278; // 116 SHORT or LONG 1
StripByteCounts 279; // 117 LONG or SHORT StripsPerImage
MinSampleValue 280; // 118 SHORT SamplesPerPixel
MaxSampleValue 281; // 119 SHORT SamplesPerPixel
XResolution 282; // 11A RATIONAL 1
YResolution 283; // 11B RATIONAL 1
PlanarConfiguration 284; // 11C SHORT 1
PageName 285; // 11D ASCII
XPosition 286; // 11E RATIONAL
YPosition 287; // 11F RATIONAL
FreeOffsets 288; // 120 LONG
FreeByteCounts 289; // 121 LONG
GrayResponseUnit 290; // 122 SHORT
GrayResponseCurve 291; // 123 SHORT 2**BitsPerSample
T4Options 292; // 124 LONG 1
T6Options 293; // 125 LONG 1
ResolutionUnit 296; // 128 SHORT 1
PageNumber 297; // 129 SHORT 2
TransferFunction 301; // 12D SHORT
Software 305; // 131 ASCII
DateTime 306; // 132 ASCII 20
Artist 315; // 13B ASCII
HostComputer 316; // 13C ASCII
Predictor 317; // 13D SHORT 1
WhitePoint 318; // 13E RATIONAL 2
PrimaryChromaticities 319; // 13F RATIONAL 6
ColorMap 320; // 140 SHORT 3 * (2**BitsPerSample)
HalftoneHints 321; // 141 SHORT 2
TileWidth 322; // 142 SHORT or LONG 1
TileLength 323; // 143 SHORT or LONG 1
TileOffsets 324; // 144 LONG TilesPerImage
TileByteCounts 325; // 145 SHORT or LONG TilesPerImage
InkSet 332; // 14C SHORT 1
InkNames 333; // 14D ASCII t
NumberOfInks 334; // 14E SHORT 1
DotRange 336; // 150 BYTE or SHORT 2, or 2*
TargetPrinter 337; // 151 ASCII any
ExtraSamples 338; // 152 BYTE number of extra compo
SampleFormat 339; // 153 SHORT SamplesPerPixel
SMinSampleValue 340; // 154 Any SamplesPerPixel
SMaxSampleValue 341; // 155 Any SamplesPerPixel
TransferRange 342; // 156 SHORT 6
JPEGProc 512; // 200 SHORT 1
JPEGInterchangeFormat 513; // 201 LONG 1
JPEGInterchangeFormatLngth 514; // 202 LONG 1
JPEGRestartInterval 515; // 203 SHORT 1
JPEGLosslessPredictors 517; // 205 SHORT SamplesPerPixel
JPEGPointTransforms 518; // 206 SHORT SamplesPerPixel
JPEGQTables 519; // 207 LONG SamplesPerPixel
JPEGDCTables 520; // 208 LONG SamplesPerPixel
JPEGACTables 521; // 209 LONG SamplesPerPixel
YCbCrCoefficients 529; // 211 RATIONAL 3
YCbCrSubSampling 530; // 212 SHORT 2
YCbCrPositioning 531; // 213 SHORT 1
ReferenceBlackWhite 532; // 214 LONG 2*SamplesPerPixel
Copyright 33432; // 8298 ASCII Any
}
// Note: These tags appear in the order they are mentioned in the TIFF reference
// https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
// tags!{
// // Baseline tags:
// Artist 315; // TODO add support
// // grayscale images PhotometricInterpretation 1 or 3
// BitsPerSample 258;
// CellLength 265; // TODO add support
// CellWidth 264; // TODO add support
// // palette-color images (PhotometricInterpretation 3)
// ColorMap 320; // TODO add support
// Compression 259; // TODO add support for 2 and 32773
// Copyright 33432; // TODO add support
// DateTime 306; // TODO add support
// ExtraSamples 338; // TODO add support
// FillOrder 266; // TODO add support
// FreeByteCounts 289; // TODO add support
// FreeOffsets 288; // TODO add support
// GrayResponseCurve 291; // TODO add support
// GrayResponseUnit 290; // TODO add support
// HostComputer 316; // TODO add support
// ImageDescription 270; // TODO add support
// ImageLength 257;
// ImageWidth 256;
// Make 271; // TODO add support
// MaxSampleValue 281; // TODO add support
// MinSampleValue 280; // TODO add support
// Model 272; // TODO add support
// NewSubfileType 254; // TODO add support
// Orientation 274; // TODO add support
// PhotometricInterpretation 262;
// PlanarConfiguration 284;
// ResolutionUnit 296; // TODO add support
// RowsPerStrip 278;
// SamplesPerPixel 277;
// Software 305;
// StripByteCounts 279;
// StripOffsets 273;
// SubfileType 255; // TODO add support
// Threshholding 263; // TODO add support
// XResolution 282;
// YResolution 283;
// // Advanced tags
// Predictor 317;
// // TIFF Extensions
// // Section 11 CCITT Bilevel Encodings
// // Compression
// T4Options 292;
// T6Options 293;
// // Section 12 Document Storagte and Retrieval
// DocumentName 269;
// PageName 285;
// PageNumber 297;
// XPosition 286;
// YPosition 287;
// // Section 13: LZW Compression
// // Section 14: Differencing Predictor
// // Section 15: Tiled Images -- Do not use both striporiented and tile-oriented fields in the same TIFF file
// TileWidth 322;
// TileLength 323;
// TileOffsets 324;
// TileByteCounts 325;
// // Section 16: CMYK Images
// InkSet 332;
// NumberOfInks 334;
// InkNames 333;
// DotRange 336;
// TargetPrinter 337;
// // Section 17: HalftoneHints
// HalftoneHints 321;
// // Section 18: Associated Alpha Handling
// ExtraSamples 338;
// // Section 19: Data Sample Format
// SampleFormat 339;
// SMinSampleValue 340;
// SMaxSampleValue 341;
// // Section 20: RGB Image Colorimetry
// WhitePoint 318;
// PrimaryChromaticities 319;
// TransferFunction 301;
// TransferRange 342;
// ReferenceBlackWhite 532;
// // Section 21: YCbCr Images
// }
enum_from_primitive! {
#[derive(Clone, Copy, Debug)]
pub enum Type {
BYTE = 1,
ASCII = 2,
SHORT = 3,
LONG = 4,
RATIONAL = 5,
SBYTE = 6,
UNDEFINED = 7,
SSHORT = 8,
SLONG = 9,
SRATIONAL = 10,
FLOAT = 11,
DOUBLE = 12,
}
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value {
//Signed(i32),
Unsigned(u32),
List(Vec<Value>)
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value_Type {
Value,
Offset
}
impl Value {
pub fn as_u32(self) -> ::image::ImageResult<u32> {
match self {
Unsigned(val) => Ok(val),
val => Err(::image::ImageError::FormatError(format!(
"Expected unsigned integer, {:?} found.", val
)))
}
}
pub fn as_u32_vec(self) -> ::image::ImageResult<Vec<u32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec.into_iter() {
new_vec.push(try!(v.as_u32()))
}
Ok(new_vec)
},
Unsigned(val) => Ok(vec![val]),
//_ => Err(::image::FormatError("Tag data malformed.".to_string()))
}
}
}
pub struct Entry {
type_: Type,
count: u32,
offset: [u8; 4]
}
impl ::std::fmt::Debug for Entry {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
fmt.write_str(&format!("Entry {{ type_: {:?}, count: {:?}, offset: {:?} }}",
self.type_,
self.count,
&self.offset,
// String::from_utf8_lossy ( &self.offset ),
))
}
}
impl Entry {
pub fn | (type_: Type, count: u32, offset: [u8; 4] ) -> Entry {
Entry {
type_: type_,
count: count,
offset: offset,
}
}
/// Returns a mem_reader for the offset/value field
pub fn r(&self, byte_order: ByteOrder) -> SmartReader<io::Cursor<Vec<u8>>> {
SmartReader::wrap(
io::Cursor::new(self.offset.to_vec()),
byte_order
)
}
// Refactor this to remove the dependency on decoder,
pub fn val<R: Read + Seek>(&self, decoder: &mut super::TIFFDecoder<R>)
-> ::image::ImageResult<Value> {
let bo = decoder.byte_order();
match (self.type_, self.count) {
// TODO check if this could give wrong results
// at a different endianess of file/computer.
(Type::BYTE, 1) => Ok(Unsigned(self.offset[0] as u32)),
(Type::SHORT, 1) => Ok(Unsigned(try!(self.r(bo).read_u16()) as u32)),
(Type::SHORT, 2) => {
let mut r = self.r(bo);
Ok(List(vec![
Unsigned(try!(r.read_u16()) as u32),
Unsigned(try!(r.read_u16()) as u32)
]))
},
(Type::SHORT, n) => {
let mut v = Vec::with_capacity(n as usize);
try!(decoder.goto_offset(try!(self.r(bo).read_u32())));
for _ in 0 .. n {
v.push(Unsigned(try!(decoder.read_short()) as u32))
}
Ok(List(v))
},
(Type::LONG, 1) => Ok(Unsigned(try!(self.r(bo).read_u32()))),
(Type::LONG, n) => {
let mut v = Vec::with_capacity(n as usize);
try!(decoder.goto_offset(try!(self.r(bo).read_u32())));
for _ in 0 .. n {
v.push(Unsigned(try!(decoder.read_long())))
}
Ok(List(v))
}
_ => Err(::image::ImageError::UnsupportedError("Unsupported data type.".to_string()))
}
}
}
/// Type representing an Image File Directory
pub type Directory = HashMap<Tag, Entry>;
| new | identifier_name |
ifd.rs | //! Function for reading TIFF tags
use std::io::{self, Read, Seek};
use std::collections::{HashMap};
use super::stream::{ByteOrder, SmartReader, EndianReader};
use self::Value::{Unsigned, List};
macro_rules! tags {
{$(
$tag:ident
$val:expr;
)*} => {
/// TIFF tag
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum Tag {
$($tag,)*
Unknown(u16)
}
impl Tag {
pub fn from_u16(n: u16) -> Tag {
$(if n == $val { Tag::$tag } else)* {
Tag::Unknown(n)
}
}
}
}
}
// taken from https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf Appendix A
// TagName Value; // in HEx TagTYPE Number of Values
tags! {
NewSubfileType 254; // FE LONG 1
SubfileType 255; // FF SHORT 1
ImageWidth 256; // 100 SHORT or LONG 1
ImageLength 257; // 101 SHORT or LONG 1
BitsPerSample 258; // 102 SHORT SamplesPerPixel
Compression 259; // 103 SHORT 1
PhotometricInterpretation 262; // 106 SHORT
Threshholding 263; // 107 SHORT 1
CellWidth 264; // 108 SHORT 1
CellLength 265; // 109 SHORT 1
FillOrder 266; // 10A SHORT 1
DocumentName 269; // 10D ASCII
ImageDescription 270; // 10E ASCII
Make 271; // 10F ASCII
Model 272; // 110 ASCII
StripOffsets 273; // 111 SHORT or LONG StripsPerImage
Orientation 274; // 112 SHORT 1
SamplesPerPixel 277; // 115 SHORT 1
RowsPerStrip 278; // 116 SHORT or LONG 1
StripByteCounts 279; // 117 LONG or SHORT StripsPerImage
MinSampleValue 280; // 118 SHORT SamplesPerPixel
MaxSampleValue 281; // 119 SHORT SamplesPerPixel
XResolution 282; // 11A RATIONAL 1
YResolution 283; // 11B RATIONAL 1
PlanarConfiguration 284; // 11C SHORT 1
PageName 285; // 11D ASCII
XPosition 286; // 11E RATIONAL
YPosition 287; // 11F RATIONAL
FreeOffsets 288; // 120 LONG
FreeByteCounts 289; // 121 LONG
GrayResponseUnit 290; // 122 SHORT
GrayResponseCurve 291; // 123 SHORT 2**BitsPerSample
T4Options 292; // 124 LONG 1
T6Options 293; // 125 LONG 1
ResolutionUnit 296; // 128 SHORT 1
PageNumber 297; // 129 SHORT 2
TransferFunction 301; // 12D SHORT
Software 305; // 131 ASCII
DateTime 306; // 132 ASCII 20
Artist 315; // 13B ASCII
HostComputer 316; // 13C ASCII
Predictor 317; // 13D SHORT 1
WhitePoint 318; // 13E RATIONAL 2
PrimaryChromaticities 319; // 13F RATIONAL 6
ColorMap 320; // 140 SHORT 3 * (2**BitsPerSample)
HalftoneHints 321; // 141 SHORT 2
TileWidth 322; // 142 SHORT or LONG 1
TileLength 323; // 143 SHORT or LONG 1
TileOffsets 324; // 144 LONG TilesPerImage
TileByteCounts 325; // 145 SHORT or LONG TilesPerImage
InkSet 332; // 14C SHORT 1
InkNames 333; // 14D ASCII t
NumberOfInks 334; // 14E SHORT 1
DotRange 336; // 150 BYTE or SHORT 2, or 2*
TargetPrinter 337; // 151 ASCII any
ExtraSamples 338; // 152 BYTE number of extra compo
SampleFormat 339; // 153 SHORT SamplesPerPixel
SMinSampleValue 340; // 154 Any SamplesPerPixel
SMaxSampleValue 341; // 155 Any SamplesPerPixel
TransferRange 342; // 156 SHORT 6
JPEGProc 512; // 200 SHORT 1
JPEGInterchangeFormat 513; // 201 LONG 1
JPEGInterchangeFormatLngth 514; // 202 LONG 1
JPEGRestartInterval 515; // 203 SHORT 1
JPEGLosslessPredictors 517; // 205 SHORT SamplesPerPixel
JPEGPointTransforms 518; // 206 SHORT SamplesPerPixel
JPEGQTables 519; // 207 LONG SamplesPerPixel
JPEGDCTables 520; // 208 LONG SamplesPerPixel
JPEGACTables 521; // 209 LONG SamplesPerPixel
YCbCrCoefficients 529; // 211 RATIONAL 3
YCbCrSubSampling 530; // 212 SHORT 2
YCbCrPositioning 531; // 213 SHORT 1
ReferenceBlackWhite 532; // 214 LONG 2*SamplesPerPixel
Copyright 33432; // 8298 ASCII Any
}
| // tags!{
// // Baseline tags:
// Artist 315; // TODO add support
// // grayscale images PhotometricInterpretation 1 or 3
// BitsPerSample 258;
// CellLength 265; // TODO add support
// CellWidth 264; // TODO add support
// // palette-color images (PhotometricInterpretation 3)
// ColorMap 320; // TODO add support
// Compression 259; // TODO add support for 2 and 32773
// Copyright 33432; // TODO add support
// DateTime 306; // TODO add support
// ExtraSamples 338; // TODO add support
// FillOrder 266; // TODO add support
// FreeByteCounts 289; // TODO add support
// FreeOffsets 288; // TODO add support
// GrayResponseCurve 291; // TODO add support
// GrayResponseUnit 290; // TODO add support
// HostComputer 316; // TODO add support
// ImageDescription 270; // TODO add support
// ImageLength 257;
// ImageWidth 256;
// Make 271; // TODO add support
// MaxSampleValue 281; // TODO add support
// MinSampleValue 280; // TODO add support
// Model 272; // TODO add support
// NewSubfileType 254; // TODO add support
// Orientation 274; // TODO add support
// PhotometricInterpretation 262;
// PlanarConfiguration 284;
// ResolutionUnit 296; // TODO add support
// RowsPerStrip 278;
// SamplesPerPixel 277;
// Software 305;
// StripByteCounts 279;
// StripOffsets 273;
// SubfileType 255; // TODO add support
// Threshholding 263; // TODO add support
// XResolution 282;
// YResolution 283;
// // Advanced tags
// Predictor 317;
// // TIFF Extensions
// // Section 11 CCITT Bilevel Encodings
// // Compression
// T4Options 292;
// T6Options 293;
// // Section 12 Document Storagte and Retrieval
// DocumentName 269;
// PageName 285;
// PageNumber 297;
// XPosition 286;
// YPosition 287;
// // Section 13: LZW Compression
// // Section 14: Differencing Predictor
// // Section 15: Tiled Images -- Do not use both striporiented and tile-oriented fields in the same TIFF file
// TileWidth 322;
// TileLength 323;
// TileOffsets 324;
// TileByteCounts 325;
// // Section 16: CMYK Images
// InkSet 332;
// NumberOfInks 334;
// InkNames 333;
// DotRange 336;
// TargetPrinter 337;
// // Section 17: HalftoneHints
// HalftoneHints 321;
// // Section 18: Associated Alpha Handling
// ExtraSamples 338;
// // Section 19: Data Sample Format
// SampleFormat 339;
// SMinSampleValue 340;
// SMaxSampleValue 341;
// // Section 20: RGB Image Colorimetry
// WhitePoint 318;
// PrimaryChromaticities 319;
// TransferFunction 301;
// TransferRange 342;
// ReferenceBlackWhite 532;
// // Section 21: YCbCr Images
// }
enum_from_primitive! {
#[derive(Clone, Copy, Debug)]
pub enum Type {
BYTE = 1,
ASCII = 2,
SHORT = 3,
LONG = 4,
RATIONAL = 5,
SBYTE = 6,
UNDEFINED = 7,
SSHORT = 8,
SLONG = 9,
SRATIONAL = 10,
FLOAT = 11,
DOUBLE = 12,
}
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value {
//Signed(i32),
Unsigned(u32),
List(Vec<Value>)
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value_Type {
Value,
Offset
}
impl Value {
pub fn as_u32(self) -> ::image::ImageResult<u32> {
match self {
Unsigned(val) => Ok(val),
val => Err(::image::ImageError::FormatError(format!(
"Expected unsigned integer, {:?} found.", val
)))
}
}
pub fn as_u32_vec(self) -> ::image::ImageResult<Vec<u32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec.into_iter() {
new_vec.push(try!(v.as_u32()))
}
Ok(new_vec)
},
Unsigned(val) => Ok(vec![val]),
//_ => Err(::image::FormatError("Tag data malformed.".to_string()))
}
}
}
pub struct Entry {
type_: Type,
count: u32,
offset: [u8; 4]
}
impl ::std::fmt::Debug for Entry {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
fmt.write_str(&format!("Entry {{ type_: {:?}, count: {:?}, offset: {:?} }}",
self.type_,
self.count,
&self.offset,
// String::from_utf8_lossy ( &self.offset ),
))
}
}
impl Entry {
pub fn new(type_: Type, count: u32, offset: [u8; 4] ) -> Entry {
Entry {
type_: type_,
count: count,
offset: offset,
}
}
/// Returns a mem_reader for the offset/value field
pub fn r(&self, byte_order: ByteOrder) -> SmartReader<io::Cursor<Vec<u8>>> {
SmartReader::wrap(
io::Cursor::new(self.offset.to_vec()),
byte_order
)
}
// Refactor this to remove the dependency on decoder,
pub fn val<R: Read + Seek>(&self, decoder: &mut super::TIFFDecoder<R>)
-> ::image::ImageResult<Value> {
let bo = decoder.byte_order();
match (self.type_, self.count) {
// TODO check if this could give wrong results
// at a different endianess of file/computer.
(Type::BYTE, 1) => Ok(Unsigned(self.offset[0] as u32)),
(Type::SHORT, 1) => Ok(Unsigned(try!(self.r(bo).read_u16()) as u32)),
(Type::SHORT, 2) => {
let mut r = self.r(bo);
Ok(List(vec![
Unsigned(try!(r.read_u16()) as u32),
Unsigned(try!(r.read_u16()) as u32)
]))
},
(Type::SHORT, n) => {
let mut v = Vec::with_capacity(n as usize);
try!(decoder.goto_offset(try!(self.r(bo).read_u32())));
for _ in 0 .. n {
v.push(Unsigned(try!(decoder.read_short()) as u32))
}
Ok(List(v))
},
(Type::LONG, 1) => Ok(Unsigned(try!(self.r(bo).read_u32()))),
(Type::LONG, n) => {
let mut v = Vec::with_capacity(n as usize);
try!(decoder.goto_offset(try!(self.r(bo).read_u32())));
for _ in 0 .. n {
v.push(Unsigned(try!(decoder.read_long())))
}
Ok(List(v))
}
_ => Err(::image::ImageError::UnsupportedError("Unsupported data type.".to_string()))
}
}
}
/// Type representing an Image File Directory
pub type Directory = HashMap<Tag, Entry>; |
// Note: These tags appear in the order they are mentioned in the TIFF reference
// https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf | random_line_split |
client.py | import os
from googleapiclient.discovery import build
import httplib2
from oauth2client import gce
from oauth2client.appengine import AppAssertionCredentials
from oauth2client.file import Storage
__author__ = 'ekampf'
import json
import logging
import apiclient.errors
from apiclient import http as apiclient_request
from apiclient import model as apiclient_model
from .errors import BigQueryError, BigQueryCommunicationError, BigQueryDuplicateError, \
BigQueryStreamingMaximumRowSizeExceededError, BigQueryAuthorizationError
# pylint: disable=E1002
class BigQueryModel(apiclient_model.JsonModel):
"""Adds optional global parameters to all requests."""
def __init__(self, trace=None, **kwargs):
super(BigQueryModel, self).__init__(**kwargs)
self.trace = trace
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing request."""
if 'trace' not in query_params and self.trace:
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
@staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def | (self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache))
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'sourceUris': ['gs:/%s' % s for s in gcs_links],
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_id
},
}
}
}
logging.info('about to insert job:%s', job_data)
try:
job = self.api_client.jobs().insert(projectId=project_id, body=job_data).execute()
status = job['status']
if 'errorResult' in status:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], job['jobReference'])
return job
except BigQueryError as ex:
logging.exception(ex)
raise
def monitor_insert_job(self, project_id, job_id):
try:
logging.info('about to monitor job: %s', job_id)
job = self.api_client.jobs().get(project_id, job_id)
logging.info('Got job response: %s', job)
state = job['status']['state']
if state == 'DONE':
logging.info("Job %s is done loading!", job_id)
if 'errorResult' in job['status']:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], {'projectId': project_id, 'jobId': job_id})
except BigQueryError as ex:
logging.exception(ex)
raise
def get_query_results(self, project_id, job_id, timeoutMs=None, pageToken=None, maxResults=None, startIndex=None):
"""Retrieves the results of a query job.
:param project_id: Project ID of the query job.
:param job_id: Job ID of the query job.
:param timeoutMs: integer, How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error.
:param pageToken: string, Page token, returned by a previous call, to request the next page of results
:param maxResults: integer, Maximum number of results to read
:param startIndex: string, Zero-based index of the starting row
:return:
"""
try:
return self.api_client.jobs().getQueryResults(project_id, job_id, timeoutMs, pageToken, maxResults, startIndex)
except BigQueryError as ex:
logging.exception(ex)
raise
| __init__ | identifier_name |
client.py | import os
from googleapiclient.discovery import build
import httplib2
from oauth2client import gce
from oauth2client.appengine import AppAssertionCredentials
from oauth2client.file import Storage
__author__ = 'ekampf'
import json
import logging
import apiclient.errors
from apiclient import http as apiclient_request
from apiclient import model as apiclient_model
from .errors import BigQueryError, BigQueryCommunicationError, BigQueryDuplicateError, \
BigQueryStreamingMaximumRowSizeExceededError, BigQueryAuthorizationError
# pylint: disable=E1002
class BigQueryModel(apiclient_model.JsonModel):
"""Adds optional global parameters to all requests."""
def __init__(self, trace=None, **kwargs):
super(BigQueryModel, self).__init__(**kwargs)
self.trace = trace
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing request."""
if 'trace' not in query_params and self.trace:
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
@staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache))
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
|
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'sourceUris': ['gs:/%s' % s for s in gcs_links],
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_id
},
}
}
}
logging.info('about to insert job:%s', job_data)
try:
job = self.api_client.jobs().insert(projectId=project_id, body=job_data).execute()
status = job['status']
if 'errorResult' in status:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], job['jobReference'])
return job
except BigQueryError as ex:
logging.exception(ex)
raise
def monitor_insert_job(self, project_id, job_id):
try:
logging.info('about to monitor job: %s', job_id)
job = self.api_client.jobs().get(project_id, job_id)
logging.info('Got job response: %s', job)
state = job['status']['state']
if state == 'DONE':
logging.info("Job %s is done loading!", job_id)
if 'errorResult' in job['status']:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], {'projectId': project_id, 'jobId': job_id})
except BigQueryError as ex:
logging.exception(ex)
raise
def get_query_results(self, project_id, job_id, timeoutMs=None, pageToken=None, maxResults=None, startIndex=None):
"""Retrieves the results of a query job.
:param project_id: Project ID of the query job.
:param job_id: Job ID of the query job.
:param timeoutMs: integer, How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error.
:param pageToken: string, Page token, returned by a previous call, to request the next page of results
:param maxResults: integer, Maximum number of results to read
:param startIndex: string, Zero-based index of the starting row
:return:
"""
try:
return self.api_client.jobs().getQueryResults(project_id, job_id, timeoutMs, pageToken, maxResults, startIndex)
except BigQueryError as ex:
logging.exception(ex)
raise
| 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/') | identifier_body |
client.py | import os
from googleapiclient.discovery import build
import httplib2
from oauth2client import gce
from oauth2client.appengine import AppAssertionCredentials
from oauth2client.file import Storage
__author__ = 'ekampf'
import json
import logging
import apiclient.errors
from apiclient import http as apiclient_request
from apiclient import model as apiclient_model
from .errors import BigQueryError, BigQueryCommunicationError, BigQueryDuplicateError, \
BigQueryStreamingMaximumRowSizeExceededError, BigQueryAuthorizationError
# pylint: disable=E1002
class BigQueryModel(apiclient_model.JsonModel):
"""Adds optional global parameters to all requests."""
def __init__(self, trace=None, **kwargs):
super(BigQueryModel, self).__init__(**kwargs)
self.trace = trace
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing request."""
if 'trace' not in query_params and self.trace:
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
| @staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache))
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'sourceUris': ['gs:/%s' % s for s in gcs_links],
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_id
},
}
}
}
logging.info('about to insert job:%s', job_data)
try:
job = self.api_client.jobs().insert(projectId=project_id, body=job_data).execute()
status = job['status']
if 'errorResult' in status:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], job['jobReference'])
return job
except BigQueryError as ex:
logging.exception(ex)
raise
def monitor_insert_job(self, project_id, job_id):
try:
logging.info('about to monitor job: %s', job_id)
job = self.api_client.jobs().get(project_id, job_id)
logging.info('Got job response: %s', job)
state = job['status']['state']
if state == 'DONE':
logging.info("Job %s is done loading!", job_id)
if 'errorResult' in job['status']:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], {'projectId': project_id, 'jobId': job_id})
except BigQueryError as ex:
logging.exception(ex)
raise
def get_query_results(self, project_id, job_id, timeoutMs=None, pageToken=None, maxResults=None, startIndex=None):
"""Retrieves the results of a query job.
:param project_id: Project ID of the query job.
:param job_id: Job ID of the query job.
:param timeoutMs: integer, How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error.
:param pageToken: string, Page token, returned by a previous call, to request the next page of results
:param maxResults: integer, Maximum number of results to read
:param startIndex: string, Zero-based index of the starting row
:return:
"""
try:
return self.api_client.jobs().getQueryResults(project_id, job_id, timeoutMs, pageToken, maxResults, startIndex)
except BigQueryError as ex:
logging.exception(ex)
raise | random_line_split | |
client.py | import os
from googleapiclient.discovery import build
import httplib2
from oauth2client import gce
from oauth2client.appengine import AppAssertionCredentials
from oauth2client.file import Storage
__author__ = 'ekampf'
import json
import logging
import apiclient.errors
from apiclient import http as apiclient_request
from apiclient import model as apiclient_model
from .errors import BigQueryError, BigQueryCommunicationError, BigQueryDuplicateError, \
BigQueryStreamingMaximumRowSizeExceededError, BigQueryAuthorizationError
# pylint: disable=E1002
class BigQueryModel(apiclient_model.JsonModel):
"""Adds optional global parameters to all requests."""
def __init__(self, trace=None, **kwargs):
super(BigQueryModel, self).__init__(**kwargs)
self.trace = trace
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing request."""
if 'trace' not in query_params and self.trace:
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
@staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
|
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'sourceUris': ['gs:/%s' % s for s in gcs_links],
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_id
},
}
}
}
logging.info('about to insert job:%s', job_data)
try:
job = self.api_client.jobs().insert(projectId=project_id, body=job_data).execute()
status = job['status']
if 'errorResult' in status:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], job['jobReference'])
return job
except BigQueryError as ex:
logging.exception(ex)
raise
def monitor_insert_job(self, project_id, job_id):
try:
logging.info('about to monitor job: %s', job_id)
job = self.api_client.jobs().get(project_id, job_id)
logging.info('Got job response: %s', job)
state = job['status']['state']
if state == 'DONE':
logging.info("Job %s is done loading!", job_id)
if 'errorResult' in job['status']:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], {'projectId': project_id, 'jobId': job_id})
except BigQueryError as ex:
logging.exception(ex)
raise
def get_query_results(self, project_id, job_id, timeoutMs=None, pageToken=None, maxResults=None, startIndex=None):
"""Retrieves the results of a query job.
:param project_id: Project ID of the query job.
:param job_id: Job ID of the query job.
:param timeoutMs: integer, How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error.
:param pageToken: string, Page token, returned by a previous call, to request the next page of results
:param maxResults: integer, Maximum number of results to read
:param startIndex: string, Zero-based index of the starting row
:return:
"""
try:
return self.api_client.jobs().getQueryResults(project_id, job_id, timeoutMs, pageToken, maxResults, startIndex)
except BigQueryError as ex:
logging.exception(ex)
raise
| from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache)) | conditional_block |
command.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Command Buffer device interface
use std::ops::Deref;
use std::collections::hash_set::{self, HashSet};
use {Resources, IndexType, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads ?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes ?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if !buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) { | }
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffers<'a, R: Resources> {
buffers: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffers<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.buffers.next().map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffersChain<'a, R: Resources> {
fst: AccessInfoBuffers<'a, R>,
snd: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffersChain<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.fst.next().or_else(|| self.snd.next())
.map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
} | for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
} | random_line_split |
command.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Command Buffer device interface
use std::ops::Deref;
use std::collections::hash_set::{self, HashSet};
use {Resources, IndexType, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn | (&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads ?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes ?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if !buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) {
for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
}
}
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffers<'a, R: Resources> {
buffers: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffers<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.buffers.next().map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffersChain<'a, R: Resources> {
fst: AccessInfoBuffers<'a, R>,
snd: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffersChain<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.fst.next().or_else(|| self.snd.next())
.map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
| mapped_reads | identifier_name |
command.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Command Buffer device interface
use std::ops::Deref;
use std::collections::hash_set::{self, HashSet};
use {Resources, IndexType, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() |
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads ?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes ?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if !buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) {
for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
}
}
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffers<'a, R: Resources> {
buffers: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffers<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.buffers.next().map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffersChain<'a, R: Resources> {
fst: AccessInfoBuffers<'a, R>,
snd: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffersChain<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.fst.next().or_else(|| self.snd.next())
.map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
| {
self.mapped_reads.insert(buffer.clone());
} | conditional_block |
command.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Command Buffer device interface
use std::ops::Deref;
use std::collections::hash_set::{self, HashSet};
use {Resources, IndexType, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor |
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads ?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes ?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if !buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) {
for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
}
}
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffers<'a, R: Resources> {
buffers: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffers<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.buffers.next().map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffersChain<'a, R: Resources> {
fst: AccessInfoBuffers<'a, R>,
snd: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffersChain<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.fst.next().or_else(|| self.snd.next())
.map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
| {
ClearColor::Uint([v, 0, 0, 0])
} | identifier_body |
xcapture.go | package main
import (
"flag"
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
"sync"
"time"
"unsafe"
"honnef.co/go/xcapture/internal/shm"
"github.com/BurntSushi/xgb"
"github.com/BurntSushi/xgb/composite"
"github.com/BurntSushi/xgb/damage"
xshm "github.com/BurntSushi/xgb/shm"
"github.com/BurntSushi/xgb/xfixes"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/codahale/hdrhistogram"
)
const bytesPerPixel = 4
const numPages = 4
func min(xs ...int) int {
if len(xs) == 0 {
return 0
}
m := xs[0]
for _, x := range xs[1:] {
if x < m {
m = x
}
}
return m
}
// TODO(dh): this definition of a window is specific to Linux. On
// Windows, for example, we wouldn't have an integer specifier for the
// window.
type Window struct {
ID int
mu sync.RWMutex
width int
height int
borderWidth int
}
func (w *Window) SetDimensions(width, height, border int) {
w.mu.Lock()
defer w.mu.Unlock()
w.width = width
w.height = height
w.borderWidth = border
}
func (w *Window) Dimensions() (width, height, border int) {
w.mu.RLock()
defer w.mu.RUnlock()
return w.width, w.height, w.borderWidth
}
type Canvas struct {
Width int
Height int
}
type Frame struct {
Data []byte
Time time.Time
}
type Buffer struct {
Pages int
PageSize int
Data []byte
ShmID int
}
func (b Buffer) PageOffset(idx int) int {
return b.PageSize * idx
}
func (b Buffer) Page(idx int) []byte {
offset := b.PageOffset(idx)
size := b.PageSize
return b.Data[offset : offset+size : offset+size]
}
type BitmapInfoHeader struct {
Size uint32
Width int32
Height int32
Planes uint16
BitCount uint16
Compression [4]byte
SizeImage uint32
XPelsPerMeter int32
YPelsPerMeter int32
ClrUsed uint32
ClrImportant uint32
}
func NewBuffer(pageSize, pages int) (Buffer, error) {
size := pageSize * pages
seg, err := shm.Create(size)
if err != nil {
return Buffer{}, err
}
data, err := seg.Attach()
if err != nil {
return Buffer{}, err
}
sh := &reflect.SliceHeader{
Data: uintptr(data),
Len: size,
Cap: size,
}
b := (*(*[]byte)(unsafe.Pointer(sh)))
return Buffer{
Pages: pages,
PageSize: pageSize,
Data: b,
ShmID: seg.ID,
}, nil
}
type EventLoop struct {
conn *xgb.Conn
mu sync.RWMutex
listeners []chan xgb.Event
}
func NewEventLoop(conn *xgb.Conn) *EventLoop {
el := &EventLoop{conn: conn}
go el.start()
return el
}
func (el *EventLoop) Register(ch chan xgb.Event) {
el.mu.Lock()
defer el.mu.Unlock()
el.listeners = append(el.listeners, ch)
}
func (el *EventLoop) start() {
for {
ev, err := el.conn.WaitForEvent()
if err != nil {
continue
}
el.mu.RLock()
ls := el.listeners
el.mu.RUnlock()
for _, l := range ls {
l <- ev
}
}
}
type CaptureEvent struct {
Resized bool
}
type ResizeMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
win *Window
}
func NewResizeMonitor(el *EventLoop, win *Window) *ResizeMonitor {
res := &ResizeMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
win: win,
}
el.Register(res.elCh)
go res.start()
return res
}
func (res *ResizeMonitor) start() {
for ev := range res.elCh {
if ev, ok := ev.(xproto.ConfigureNotifyEvent); ok {
w, h, bw := res.win.Dimensions()
if int(ev.Width) != w || int(ev.Height) != h || int(ev.BorderWidth) != bw {
w, h, bw = int(ev.Width), int(ev.Height), int(ev.BorderWidth)
res.win.SetDimensions(w, h, bw)
select {
case res.C <- CaptureEvent{true}:
default:
}
}
}
}
}
type DamageMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
conn *xgb.Conn
fps int
win *Window
}
func NewDamageMonitor(conn *xgb.Conn, el *EventLoop, win *Window, fps int) *DamageMonitor {
dmg := &DamageMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
conn: conn,
fps: fps,
win: win,
}
el.Register(dmg.elCh)
go dmg.startDamage()
go dmg.startCursor()
return dmg
}
func (dmg *DamageMonitor) startDamage() {
xdmg, err := damage.NewDamageId(dmg.conn)
if err != nil {
// XXX fall back gracefully
log.Fatal(err)
}
damage.Create(dmg.conn, xdmg, xproto.Drawable(dmg.win.ID), damage.ReportLevelRawRectangles)
for ev := range dmg.elCh {
if _, ok := ev.(damage.NotifyEvent); ok {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func (dmg *DamageMonitor) startCursor() {
var prevCursor struct{ X, Y int }
prevInWindow := true
d := time.Second / time.Duration(dmg.fps)
t := time.NewTicker(d)
for range t.C {
cursor, err := xproto.QueryPointer(dmg.conn, xproto.Window(dmg.win.ID)).Reply()
if err != nil {
log.Println("Couldn't query cursor position:", err)
continue
}
c := struct{ X, Y int }{int(cursor.WinX), int(cursor.WinY)}
if c == prevCursor {
continue
}
prevCursor = c
damaged := false
w, h, _ := dmg.win.Dimensions()
if c.X < 0 || c.Y < 0 || c.X > w || c.Y > h {
if prevInWindow {
// cursor moved out of the window, which requires a redraw
damaged = true
}
prevInWindow = false
} else {
damaged = true
}
if damaged {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func parseSize(s string) (width, height int, err error) {
err = fmt.Errorf("%q is not a valid size specification", s)
if len(s) < 3 {
return 0, 0, err
}
parts := strings.Split(s, "x")
if len(parts) != 2 {
return 0, 0, err
}
width, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid width: %s", err)
}
height, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid height: %s", err)
}
return width, height, err
}
func main() {
fps := flag.Uint("fps", 30, "FPS")
winID := flag.Int("win", 0, "Window ID")
size := flag.String("size", "", "Canvas size in the format WxH in pixels. Defaults to the initial size of the captured window")
cfr := flag.Bool("cfr", false, "Use a constant frame rate")
_ = cfr
flag.Parse()
win := &Window{ID: *winID}
xu, err := xgbutil.NewConn()
if err != nil {
log.Fatal("Couldn't connect to X server:", err)
}
if err := composite.Init(xu.Conn()); err != nil {
log.Fatal("COMPOSITE extension is not available:", err)
}
if err := xfixes.Init(xu.Conn()); err != nil {
log.Fatal("XFIXES extension is not available:", err)
}
xfixes.QueryVersion(xu.Conn(), 1, 0)
if err := xshm.Init(xu.Conn()); err != nil {
// TODO(dh) implement a slower version that is not using SHM
log.Fatal("MIT-SHM extension is not available:", err)
}
if err := composite.RedirectWindowChecked(xu.Conn(), xproto.Window(win.ID), composite.RedirectAutomatic).Check(); err != nil {
if err, ok := err.(xproto.AccessError); ok {
log.Fatal("Can't capture window, another program seems to be capturing it already:", err)
}
log.Fatal("Can't capture window:", err)
}
pix, err := xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
segID, err := xshm.NewSegId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for SHM:", err)
}
// Register event before we query the window size for the first
// time. Otherwise we could race and miss a window resize.
err = xproto.ChangeWindowAttributesChecked(xu.Conn(), xproto.Window(win.ID),
xproto.CwEventMask, []uint32{uint32(xproto.EventMaskStructureNotify)}).Check()
if err != nil {
log.Fatal("Couldn't monitor window for size changes:", err)
}
geom, err := xproto.GetGeometry(xu.Conn(), xproto.Drawable(win.ID)).Reply()
if err != nil {
log.Fatal("Could not determine window dimensions:", err)
}
win.SetDimensions(int(geom.Width), int(geom.Height), int(geom.BorderWidth))
var canvas Canvas
if *size != "" {
width, height, err := parseSize(*size)
if err != nil {
log.Fatal(err)
}
canvas = Canvas{width, height}
} else {
canvas = Canvas{
Width: int(geom.Width),
Height: int(geom.Height),
}
}
buf, err := NewBuffer(canvas.Width*canvas.Height*bytesPerPixel, numPages)
if err != nil {
log.Fatal("Could not create shared memory:", err)
}
if err := xshm.AttachChecked(xu.Conn(), segID, uint32(buf.ShmID), false).Check(); err != nil {
log.Fatal("Could not attach shared memory to X server:", err)
}
i := 0
ch := make(chan Frame)
tags := map[string]string{
"DATE_RECORDED": time.Now().UTC().Format("2006-01-02 15:04:05.999"),
"WINDOW_ID": strconv.Itoa(win.ID),
}
vw := NewVideoWriter(canvas, int(*fps), *cfr, tags, os.Stdout)
if err := vw.Start(); err != nil {
log.Fatal("Couldn't write output:", err)
}
chistMu := &sync.Mutex{}
chist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
whist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
rhist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
var lastSlow time.Time
var slows uint64
go func() {
d := time.Second / time.Duration(*fps)
t := time.NewTicker(d)
start := time.Now()
dupped := 0
var prevFrameTime time.Time
first := true
for ts := range t.C {
if rhist.TotalCount()%int64(*fps) == 0 {
chistMu.Lock()
var cbracket hdrhistogram.Bracket
var wbracket hdrhistogram.Bracket
var rbracket hdrhistogram.Bracket
brackets := chist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
cbracket = bracket
}
brackets = whist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
wbracket = bracket
}
brackets = rhist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
rbracket = bracket
}
s := "%d frames, %d dup, started recording %s ago\n" +
"capture latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"write latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"render loop min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"Last slowdown: %s (%d total)\n"
if !first {
s = "\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\r" + s
}
first = false
var dslow interface{}
if lastSlow.IsZero() {
dslow = "never"
} else {
dslow = time.Since(lastSlow).String() + " ago"
}
fmt.Fprintf(os.Stderr, s,
whist.TotalCount(), dupped, time.Since(start),
milliseconds(chist.Min()), milliseconds(chist.Max()), milliseconds(int64(chist.Mean())), milliseconds(int64(chist.StdDev())), cbracket.Quantile, milliseconds(cbracket.ValueAt),
milliseconds(whist.Min()), milliseconds(whist.Max()), milliseconds(int64(whist.Mean())), milliseconds(int64(whist.StdDev())), wbracket.Quantile, milliseconds(wbracket.ValueAt),
milliseconds(rhist.Min()), milliseconds(rhist.Max()), milliseconds(int64(rhist.Mean())), milliseconds(int64(rhist.StdDev())), rbracket.Quantile, milliseconds(rbracket.ValueAt),
dslow, slows)
chistMu.Unlock()
}
var err error
t := time.Now()
select {
case frame := <-ch:
err = vw.SendFrame(frame)
prevFrameTime = frame.Time
default:
dupped++
err = vw.SendFrame(Frame{Time: prevFrameTime.Add(d)})
prevFrameTime = prevFrameTime.Add(d)
}
whist.RecordCorrectedValue(int64(time.Since(t)), int64(d))
if err != nil {
log.Fatal("Couldn't write frame:", err)
}
dt := time.Since(ts)
if dt > d {
lastSlow = time.Now()
slows++
}
rhist.RecordCorrectedValue(int64(dt), int64(d))
}
}()
el := NewEventLoop(xu.Conn())
res := NewResizeMonitor(el, win)
var other chan CaptureEvent
captureEvents := make(chan CaptureEvent, 1)
if *cfr {
other = make(chan CaptureEvent)
go func() {
for {
other <- CaptureEvent{}
}
}()
} else {
if err := damage.Init(xu.Conn()); err != nil {
// XXX fail back gracefully
log.Fatal(err)
}
damage.QueryVersion(xu.Conn(), 1, 1)
dmg := NewDamageMonitor(xu.Conn(), el, win, int(*fps))
other = dmg.C
}
go func() {
for {
var ev CaptureEvent
select {
case ev = <-res.C:
captureEvents <- ev
case ev = <-other:
captureEvents <- ev
}
}
}()
for ev := range captureEvents {
t := time.Now()
if ev.Resized {
// DRY
xproto.FreePixmap(xu.Conn(), pix)
var err error
pix, err = xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
}
w, h, bw := win.Dimensions()
offset := buf.PageOffset(i)
w = min(w, canvas.Width)
h = min(h, canvas.Height)
ts := time.Now()
_, err := xshm.GetImage(xu.Conn(), xproto.Drawable(pix), int16(bw), int16(bw), uint16(w), uint16(h), 0xFFFFFFFF, xproto.ImageFormatZPixmap, segID, uint32(offset)).Reply()
if err != nil {
continue
}
page := buf.Page(i)
if w < canvas.Width || h < canvas.Height {
i = (i + 1) % numPages
dest := buf.Page(i)
for i := range dest {
dest[i] = 0
}
for i := 0; i < h; i++ {
copy(dest[i*canvas.Width*bytesPerPixel:], page[i*w*bytesPerPixel:(i+1)*w*bytesPerPixel])
}
page = dest
}
drawCursor(xu, win, buf, page, canvas)
chistMu.Lock()
chist.RecordValue(int64(time.Since(t)))
chistMu.Unlock()
ch <- Frame{Data: page, Time: ts}
i = (i + 1) % numPages
}
}
func drawCursor(xu *xgbutil.XUtil, win *Window, buf Buffer, page []byte, canvas Canvas) {
// TODO(dh): We don't need to fetch the cursor image every time.
// We could listen to cursor notify events, fetch the cursor if we
// haven't seen it yet, then cache the cursor.
cursor, err := xfixes.GetCursorImage(xu.Conn()).Reply()
if err != nil {
return
}
pos, err := xproto.TranslateCoordinates(xu.Conn(), xu.RootWin(), xproto.Window(win.ID), cursor.X, cursor.Y).Reply()
if err != nil {
return
}
w, h, _ := win.Dimensions()
w = min(w, canvas.Width)
h = min(h, canvas.Height)
if pos.DstY < 0 || pos.DstX < 0 || int(pos.DstY) > h || int(pos.DstX) > w {
// cursor outside of our window
return
}
for i, p := range cursor.CursorImage {
row := i/int(cursor.Width) + int(pos.DstY) - int(cursor.Yhot)
col := i%int(cursor.Width) + int(pos.DstX) - int(cursor.Xhot)
if row >= canvas.Height || col >= canvas.Width || row < 0 || col < 0 {
// cursor is partially off-screen
break
}
off := row*canvas.Width*bytesPerPixel + col*bytesPerPixel
alpha := (p >> 24) + 1
invAlpha := 256 - (p >> 24)
page[off+3] = 255
page[off+2] = byte((alpha*uint32(byte(p>>16)) + invAlpha*uint32(page[off+2])) >> 8)
page[off+1] = byte((alpha*uint32(byte(p>>8)) + invAlpha*uint32(page[off+1])) >> 8)
page[off+0] = byte((alpha*uint32(byte(p>>0)) + invAlpha*uint32(page[off+0])) >> 8)
}
}
func roundDuration(d, m time.Duration) time.Duration {
| unc milliseconds(di int64) float64 {
d := time.Duration(di)
sec := d / time.Millisecond
nsec := d % time.Millisecond
return float64(sec) + float64(nsec)*1e-6
}
| if m <= 0 {
return d
}
r := d % m
if r < 0 {
r = -r
if r+r < m {
return d + r
}
if d1 := d - m + r; d1 < d {
return d1
}
return d // overflow
}
if r+r < m {
return d - r
}
if d1 := d + m - r; d1 > d {
return d1
}
return d // overflow
}
f | identifier_body |
xcapture.go | package main
import (
"flag"
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
"sync"
"time"
"unsafe"
"honnef.co/go/xcapture/internal/shm"
"github.com/BurntSushi/xgb"
"github.com/BurntSushi/xgb/composite"
"github.com/BurntSushi/xgb/damage"
xshm "github.com/BurntSushi/xgb/shm"
"github.com/BurntSushi/xgb/xfixes"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/codahale/hdrhistogram"
)
const bytesPerPixel = 4
const numPages = 4
func min(xs ...int) int {
if len(xs) == 0 {
return 0
}
m := xs[0]
for _, x := range xs[1:] {
if x < m {
m = x
}
}
return m
}
// TODO(dh): this definition of a window is specific to Linux. On
// Windows, for example, we wouldn't have an integer specifier for the
// window.
type Window struct {
ID int
mu sync.RWMutex
width int
height int
borderWidth int
}
func (w *Window) SetDimensions(width, height, border int) {
w.mu.Lock()
defer w.mu.Unlock()
w.width = width
w.height = height
w.borderWidth = border
}
func (w *Window) Dimensions() (width, height, border int) {
w.mu.RLock()
defer w.mu.RUnlock()
return w.width, w.height, w.borderWidth
}
type Canvas struct {
Width int
Height int
}
type Frame struct {
Data []byte
Time time.Time
}
type Buffer struct {
Pages int
PageSize int
Data []byte
ShmID int
}
func (b Buffer) PageOffset(idx int) int {
return b.PageSize * idx
}
func (b Buffer) Page(idx int) []byte {
offset := b.PageOffset(idx)
size := b.PageSize
return b.Data[offset : offset+size : offset+size]
}
type BitmapInfoHeader struct {
Size uint32
Width int32
Height int32
Planes uint16
BitCount uint16
Compression [4]byte
SizeImage uint32
XPelsPerMeter int32
YPelsPerMeter int32
ClrUsed uint32
ClrImportant uint32
}
func NewBuffer(pageSize, pages int) (Buffer, error) {
size := pageSize * pages
seg, err := shm.Create(size)
if err != nil {
return Buffer{}, err
}
data, err := seg.Attach()
if err != nil {
return Buffer{}, err
}
sh := &reflect.SliceHeader{
Data: uintptr(data),
Len: size,
Cap: size,
}
b := (*(*[]byte)(unsafe.Pointer(sh)))
return Buffer{
Pages: pages,
PageSize: pageSize,
Data: b,
ShmID: seg.ID,
}, nil
}
type EventLoop struct {
conn *xgb.Conn
mu sync.RWMutex
listeners []chan xgb.Event
}
func NewEventLoop(conn *xgb.Conn) *EventLoop {
el := &EventLoop{conn: conn}
go el.start()
return el
}
func (el *EventLoop) Register(ch chan xgb.Event) {
el.mu.Lock()
defer el.mu.Unlock()
el.listeners = append(el.listeners, ch)
}
func (el *EventLoop) start() {
for {
ev, err := el.conn.WaitForEvent()
if err != nil {
continue
}
el.mu.RLock()
ls := el.listeners
el.mu.RUnlock()
for _, l := range ls {
l <- ev
}
}
}
type CaptureEvent struct {
Resized bool
}
type ResizeMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
win *Window
}
func NewResizeMonitor(el *EventLoop, win *Window) *ResizeMonitor {
res := &ResizeMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
win: win,
}
el.Register(res.elCh)
go res.start()
return res
}
func (res *ResizeMonitor) start() {
for ev := range res.elCh {
if ev, ok := ev.(xproto.ConfigureNotifyEvent); ok {
w, h, bw := res.win.Dimensions()
if int(ev.Width) != w || int(ev.Height) != h || int(ev.BorderWidth) != bw {
w, h, bw = int(ev.Width), int(ev.Height), int(ev.BorderWidth)
res.win.SetDimensions(w, h, bw)
select {
case res.C <- CaptureEvent{true}:
default:
}
}
}
}
}
type DamageMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
conn *xgb.Conn
fps int
win *Window
}
func NewDamageMonitor(conn *xgb.Conn, el *EventLoop, win *Window, fps int) *DamageMonitor {
dmg := &DamageMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
conn: conn,
fps: fps,
win: win,
}
el.Register(dmg.elCh)
go dmg.startDamage()
go dmg.startCursor()
return dmg
}
func (dmg *DamageMonitor) startDamage() {
xdmg, err := damage.NewDamageId(dmg.conn)
if err != nil {
// XXX fall back gracefully
log.Fatal(err)
}
damage.Create(dmg.conn, xdmg, xproto.Drawable(dmg.win.ID), damage.ReportLevelRawRectangles)
for ev := range dmg.elCh {
if _, ok := ev.(damage.NotifyEvent); ok {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func (dmg *DamageMonitor) startCursor() {
var prevCursor struct{ X, Y int }
prevInWindow := true
d := time.Second / time.Duration(dmg.fps)
t := time.NewTicker(d)
for range t.C {
cursor, err := xproto.QueryPointer(dmg.conn, xproto.Window(dmg.win.ID)).Reply()
if err != nil {
log.Println("Couldn't query cursor position:", err)
continue
}
c := struct{ X, Y int }{int(cursor.WinX), int(cursor.WinY)}
if c == prevCursor {
continue
}
prevCursor = c
damaged := false
w, h, _ := dmg.win.Dimensions()
if c.X < 0 || c.Y < 0 || c.X > w || c.Y > h {
if prevInWindow {
// cursor moved out of the window, which requires a redraw
damaged = true
}
prevInWindow = false
} else {
damaged = true
}
if damaged {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func parseSize(s string) (width, height int, err error) {
err = fmt.Errorf("%q is not a valid size specification", s)
if len(s) < 3 {
return 0, 0, err
}
parts := strings.Split(s, "x")
if len(parts) != 2 {
return 0, 0, err
}
width, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid width: %s", err)
}
height, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid height: %s", err)
}
return width, height, err
}
func main() {
fps := flag.Uint("fps", 30, "FPS")
winID := flag.Int("win", 0, "Window ID")
size := flag.String("size", "", "Canvas size in the format WxH in pixels. Defaults to the initial size of the captured window")
cfr := flag.Bool("cfr", false, "Use a constant frame rate")
_ = cfr
flag.Parse()
win := &Window{ID: *winID}
xu, err := xgbutil.NewConn()
if err != nil {
log.Fatal("Couldn't connect to X server:", err)
}
if err := composite.Init(xu.Conn()); err != nil {
log.Fatal("COMPOSITE extension is not available:", err)
}
if err := xfixes.Init(xu.Conn()); err != nil {
log.Fatal("XFIXES extension is not available:", err)
}
xfixes.QueryVersion(xu.Conn(), 1, 0)
if err := xshm.Init(xu.Conn()); err != nil {
// TODO(dh) implement a slower version that is not using SHM
log.Fatal("MIT-SHM extension is not available:", err)
}
if err := composite.RedirectWindowChecked(xu.Conn(), xproto.Window(win.ID), composite.RedirectAutomatic).Check(); err != nil {
if err, ok := err.(xproto.AccessError); ok {
log.Fatal("Can't capture window, another program seems to be capturing it already:", err)
}
log.Fatal("Can't capture window:", err)
}
pix, err := xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
segID, err := xshm.NewSegId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for SHM:", err)
}
// Register event before we query the window size for the first
// time. Otherwise we could race and miss a window resize.
err = xproto.ChangeWindowAttributesChecked(xu.Conn(), xproto.Window(win.ID),
xproto.CwEventMask, []uint32{uint32(xproto.EventMaskStructureNotify)}).Check()
if err != nil {
log.Fatal("Couldn't monitor window for size changes:", err)
}
geom, err := xproto.GetGeometry(xu.Conn(), xproto.Drawable(win.ID)).Reply()
if err != nil {
log.Fatal("Could not determine window dimensions:", err)
}
win.SetDimensions(int(geom.Width), int(geom.Height), int(geom.BorderWidth))
var canvas Canvas
if *size != "" {
width, height, err := parseSize(*size)
if err != nil {
log.Fatal(err)
}
canvas = Canvas{width, height}
} else {
canvas = Canvas{
Width: int(geom.Width),
Height: int(geom.Height),
}
}
buf, err := NewBuffer(canvas.Width*canvas.Height*bytesPerPixel, numPages)
if err != nil {
log.Fatal("Could not create shared memory:", err)
}
if err := xshm.AttachChecked(xu.Conn(), segID, uint32(buf.ShmID), false).Check(); err != nil {
log.Fatal("Could not attach shared memory to X server:", err)
}
i := 0
ch := make(chan Frame)
tags := map[string]string{
"DATE_RECORDED": time.Now().UTC().Format("2006-01-02 15:04:05.999"),
"WINDOW_ID": strconv.Itoa(win.ID),
}
vw := NewVideoWriter(canvas, int(*fps), *cfr, tags, os.Stdout)
if err := vw.Start(); err != nil {
log.Fatal("Couldn't write output:", err)
}
chistMu := &sync.Mutex{}
chist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
whist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
rhist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
var lastSlow time.Time
var slows uint64
go func() {
d := time.Second / time.Duration(*fps)
t := time.NewTicker(d)
start := time.Now()
dupped := 0
var prevFrameTime time.Time
first := true
for ts := range t.C | ()
el := NewEventLoop(xu.Conn())
res := NewResizeMonitor(el, win)
var other chan CaptureEvent
captureEvents := make(chan CaptureEvent, 1)
if *cfr {
other = make(chan CaptureEvent)
go func() {
for {
other <- CaptureEvent{}
}
}()
} else {
if err := damage.Init(xu.Conn()); err != nil {
// XXX fail back gracefully
log.Fatal(err)
}
damage.QueryVersion(xu.Conn(), 1, 1)
dmg := NewDamageMonitor(xu.Conn(), el, win, int(*fps))
other = dmg.C
}
go func() {
for {
var ev CaptureEvent
select {
case ev = <-res.C:
captureEvents <- ev
case ev = <-other:
captureEvents <- ev
}
}
}()
for ev := range captureEvents {
t := time.Now()
if ev.Resized {
// DRY
xproto.FreePixmap(xu.Conn(), pix)
var err error
pix, err = xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
}
w, h, bw := win.Dimensions()
offset := buf.PageOffset(i)
w = min(w, canvas.Width)
h = min(h, canvas.Height)
ts := time.Now()
_, err := xshm.GetImage(xu.Conn(), xproto.Drawable(pix), int16(bw), int16(bw), uint16(w), uint16(h), 0xFFFFFFFF, xproto.ImageFormatZPixmap, segID, uint32(offset)).Reply()
if err != nil {
continue
}
page := buf.Page(i)
if w < canvas.Width || h < canvas.Height {
i = (i + 1) % numPages
dest := buf.Page(i)
for i := range dest {
dest[i] = 0
}
for i := 0; i < h; i++ {
copy(dest[i*canvas.Width*bytesPerPixel:], page[i*w*bytesPerPixel:(i+1)*w*bytesPerPixel])
}
page = dest
}
drawCursor(xu, win, buf, page, canvas)
chistMu.Lock()
chist.RecordValue(int64(time.Since(t)))
chistMu.Unlock()
ch <- Frame{Data: page, Time: ts}
i = (i + 1) % numPages
}
}
func drawCursor(xu *xgbutil.XUtil, win *Window, buf Buffer, page []byte, canvas Canvas) {
// TODO(dh): We don't need to fetch the cursor image every time.
// We could listen to cursor notify events, fetch the cursor if we
// haven't seen it yet, then cache the cursor.
cursor, err := xfixes.GetCursorImage(xu.Conn()).Reply()
if err != nil {
return
}
pos, err := xproto.TranslateCoordinates(xu.Conn(), xu.RootWin(), xproto.Window(win.ID), cursor.X, cursor.Y).Reply()
if err != nil {
return
}
w, h, _ := win.Dimensions()
w = min(w, canvas.Width)
h = min(h, canvas.Height)
if pos.DstY < 0 || pos.DstX < 0 || int(pos.DstY) > h || int(pos.DstX) > w {
// cursor outside of our window
return
}
for i, p := range cursor.CursorImage {
row := i/int(cursor.Width) + int(pos.DstY) - int(cursor.Yhot)
col := i%int(cursor.Width) + int(pos.DstX) - int(cursor.Xhot)
if row >= canvas.Height || col >= canvas.Width || row < 0 || col < 0 {
// cursor is partially off-screen
break
}
off := row*canvas.Width*bytesPerPixel + col*bytesPerPixel
alpha := (p >> 24) + 1
invAlpha := 256 - (p >> 24)
page[off+3] = 255
page[off+2] = byte((alpha*uint32(byte(p>>16)) + invAlpha*uint32(page[off+2])) >> 8)
page[off+1] = byte((alpha*uint32(byte(p>>8)) + invAlpha*uint32(page[off+1])) >> 8)
page[off+0] = byte((alpha*uint32(byte(p>>0)) + invAlpha*uint32(page[off+0])) >> 8)
}
}
func roundDuration(d, m time.Duration) time.Duration {
if m <= 0 {
return d
}
r := d % m
if r < 0 {
r = -r
if r+r < m {
return d + r
}
if d1 := d - m + r; d1 < d {
return d1
}
return d // overflow
}
if r+r < m {
return d - r
}
if d1 := d + m - r; d1 > d {
return d1
}
return d // overflow
}
func milliseconds(di int64) float64 {
d := time.Duration(di)
sec := d / time.Millisecond
nsec := d % time.Millisecond
return float64(sec) + float64(nsec)*1e-6
}
| {
if rhist.TotalCount()%int64(*fps) == 0 {
chistMu.Lock()
var cbracket hdrhistogram.Bracket
var wbracket hdrhistogram.Bracket
var rbracket hdrhistogram.Bracket
brackets := chist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
cbracket = bracket
}
brackets = whist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
wbracket = bracket
}
brackets = rhist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
rbracket = bracket
}
s := "%d frames, %d dup, started recording %s ago\n" +
"capture latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"write latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"render loop min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"Last slowdown: %s (%d total)\n"
if !first {
s = "\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\r" + s
}
first = false
var dslow interface{}
if lastSlow.IsZero() {
dslow = "never"
} else {
dslow = time.Since(lastSlow).String() + " ago"
}
fmt.Fprintf(os.Stderr, s,
whist.TotalCount(), dupped, time.Since(start),
milliseconds(chist.Min()), milliseconds(chist.Max()), milliseconds(int64(chist.Mean())), milliseconds(int64(chist.StdDev())), cbracket.Quantile, milliseconds(cbracket.ValueAt),
milliseconds(whist.Min()), milliseconds(whist.Max()), milliseconds(int64(whist.Mean())), milliseconds(int64(whist.StdDev())), wbracket.Quantile, milliseconds(wbracket.ValueAt),
milliseconds(rhist.Min()), milliseconds(rhist.Max()), milliseconds(int64(rhist.Mean())), milliseconds(int64(rhist.StdDev())), rbracket.Quantile, milliseconds(rbracket.ValueAt),
dslow, slows)
chistMu.Unlock()
}
var err error
t := time.Now()
select {
case frame := <-ch:
err = vw.SendFrame(frame)
prevFrameTime = frame.Time
default:
dupped++
err = vw.SendFrame(Frame{Time: prevFrameTime.Add(d)})
prevFrameTime = prevFrameTime.Add(d)
}
whist.RecordCorrectedValue(int64(time.Since(t)), int64(d))
if err != nil {
log.Fatal("Couldn't write frame:", err)
}
dt := time.Since(ts)
if dt > d {
lastSlow = time.Now()
slows++
}
rhist.RecordCorrectedValue(int64(dt), int64(d))
}
} | conditional_block |
xcapture.go | package main
import (
"flag"
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
"sync"
"time"
"unsafe"
"honnef.co/go/xcapture/internal/shm"
"github.com/BurntSushi/xgb"
"github.com/BurntSushi/xgb/composite"
"github.com/BurntSushi/xgb/damage"
xshm "github.com/BurntSushi/xgb/shm"
"github.com/BurntSushi/xgb/xfixes"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/codahale/hdrhistogram"
)
const bytesPerPixel = 4
const numPages = 4
func min(xs ...int) int {
if len(xs) == 0 {
return 0
}
m := xs[0]
for _, x := range xs[1:] {
if x < m {
m = x
}
}
return m
}
// TODO(dh): this definition of a window is specific to Linux. On
// Windows, for example, we wouldn't have an integer specifier for the
// window.
type Window struct {
ID int
mu sync.RWMutex
width int
height int
borderWidth int
}
func (w *Window) SetDimensions(width, height, border int) {
w.mu.Lock()
defer w.mu.Unlock()
w.width = width
w.height = height
w.borderWidth = border
}
func (w *Window) Dimensions() (width, height, border int) {
w.mu.RLock()
defer w.mu.RUnlock()
return w.width, w.height, w.borderWidth
}
type Canvas struct {
Width int
Height int
}
type Frame struct {
Data []byte
Time time.Time
}
type Buffer struct {
Pages int
PageSize int
Data []byte
ShmID int
}
func (b Buffer) PageOffset(idx int) int {
return b.PageSize * idx | size := b.PageSize
return b.Data[offset : offset+size : offset+size]
}
type BitmapInfoHeader struct {
Size uint32
Width int32
Height int32
Planes uint16
BitCount uint16
Compression [4]byte
SizeImage uint32
XPelsPerMeter int32
YPelsPerMeter int32
ClrUsed uint32
ClrImportant uint32
}
func NewBuffer(pageSize, pages int) (Buffer, error) {
size := pageSize * pages
seg, err := shm.Create(size)
if err != nil {
return Buffer{}, err
}
data, err := seg.Attach()
if err != nil {
return Buffer{}, err
}
sh := &reflect.SliceHeader{
Data: uintptr(data),
Len: size,
Cap: size,
}
b := (*(*[]byte)(unsafe.Pointer(sh)))
return Buffer{
Pages: pages,
PageSize: pageSize,
Data: b,
ShmID: seg.ID,
}, nil
}
type EventLoop struct {
conn *xgb.Conn
mu sync.RWMutex
listeners []chan xgb.Event
}
func NewEventLoop(conn *xgb.Conn) *EventLoop {
el := &EventLoop{conn: conn}
go el.start()
return el
}
func (el *EventLoop) Register(ch chan xgb.Event) {
el.mu.Lock()
defer el.mu.Unlock()
el.listeners = append(el.listeners, ch)
}
func (el *EventLoop) start() {
for {
ev, err := el.conn.WaitForEvent()
if err != nil {
continue
}
el.mu.RLock()
ls := el.listeners
el.mu.RUnlock()
for _, l := range ls {
l <- ev
}
}
}
type CaptureEvent struct {
Resized bool
}
type ResizeMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
win *Window
}
func NewResizeMonitor(el *EventLoop, win *Window) *ResizeMonitor {
res := &ResizeMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
win: win,
}
el.Register(res.elCh)
go res.start()
return res
}
func (res *ResizeMonitor) start() {
for ev := range res.elCh {
if ev, ok := ev.(xproto.ConfigureNotifyEvent); ok {
w, h, bw := res.win.Dimensions()
if int(ev.Width) != w || int(ev.Height) != h || int(ev.BorderWidth) != bw {
w, h, bw = int(ev.Width), int(ev.Height), int(ev.BorderWidth)
res.win.SetDimensions(w, h, bw)
select {
case res.C <- CaptureEvent{true}:
default:
}
}
}
}
}
type DamageMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
conn *xgb.Conn
fps int
win *Window
}
func NewDamageMonitor(conn *xgb.Conn, el *EventLoop, win *Window, fps int) *DamageMonitor {
dmg := &DamageMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
conn: conn,
fps: fps,
win: win,
}
el.Register(dmg.elCh)
go dmg.startDamage()
go dmg.startCursor()
return dmg
}
func (dmg *DamageMonitor) startDamage() {
xdmg, err := damage.NewDamageId(dmg.conn)
if err != nil {
// XXX fall back gracefully
log.Fatal(err)
}
damage.Create(dmg.conn, xdmg, xproto.Drawable(dmg.win.ID), damage.ReportLevelRawRectangles)
for ev := range dmg.elCh {
if _, ok := ev.(damage.NotifyEvent); ok {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func (dmg *DamageMonitor) startCursor() {
var prevCursor struct{ X, Y int }
prevInWindow := true
d := time.Second / time.Duration(dmg.fps)
t := time.NewTicker(d)
for range t.C {
cursor, err := xproto.QueryPointer(dmg.conn, xproto.Window(dmg.win.ID)).Reply()
if err != nil {
log.Println("Couldn't query cursor position:", err)
continue
}
c := struct{ X, Y int }{int(cursor.WinX), int(cursor.WinY)}
if c == prevCursor {
continue
}
prevCursor = c
damaged := false
w, h, _ := dmg.win.Dimensions()
if c.X < 0 || c.Y < 0 || c.X > w || c.Y > h {
if prevInWindow {
// cursor moved out of the window, which requires a redraw
damaged = true
}
prevInWindow = false
} else {
damaged = true
}
if damaged {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func parseSize(s string) (width, height int, err error) {
err = fmt.Errorf("%q is not a valid size specification", s)
if len(s) < 3 {
return 0, 0, err
}
parts := strings.Split(s, "x")
if len(parts) != 2 {
return 0, 0, err
}
width, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid width: %s", err)
}
height, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid height: %s", err)
}
return width, height, err
}
func main() {
fps := flag.Uint("fps", 30, "FPS")
winID := flag.Int("win", 0, "Window ID")
size := flag.String("size", "", "Canvas size in the format WxH in pixels. Defaults to the initial size of the captured window")
cfr := flag.Bool("cfr", false, "Use a constant frame rate")
_ = cfr
flag.Parse()
win := &Window{ID: *winID}
xu, err := xgbutil.NewConn()
if err != nil {
log.Fatal("Couldn't connect to X server:", err)
}
if err := composite.Init(xu.Conn()); err != nil {
log.Fatal("COMPOSITE extension is not available:", err)
}
if err := xfixes.Init(xu.Conn()); err != nil {
log.Fatal("XFIXES extension is not available:", err)
}
xfixes.QueryVersion(xu.Conn(), 1, 0)
if err := xshm.Init(xu.Conn()); err != nil {
// TODO(dh) implement a slower version that is not using SHM
log.Fatal("MIT-SHM extension is not available:", err)
}
if err := composite.RedirectWindowChecked(xu.Conn(), xproto.Window(win.ID), composite.RedirectAutomatic).Check(); err != nil {
if err, ok := err.(xproto.AccessError); ok {
log.Fatal("Can't capture window, another program seems to be capturing it already:", err)
}
log.Fatal("Can't capture window:", err)
}
pix, err := xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
segID, err := xshm.NewSegId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for SHM:", err)
}
// Register event before we query the window size for the first
// time. Otherwise we could race and miss a window resize.
err = xproto.ChangeWindowAttributesChecked(xu.Conn(), xproto.Window(win.ID),
xproto.CwEventMask, []uint32{uint32(xproto.EventMaskStructureNotify)}).Check()
if err != nil {
log.Fatal("Couldn't monitor window for size changes:", err)
}
geom, err := xproto.GetGeometry(xu.Conn(), xproto.Drawable(win.ID)).Reply()
if err != nil {
log.Fatal("Could not determine window dimensions:", err)
}
win.SetDimensions(int(geom.Width), int(geom.Height), int(geom.BorderWidth))
var canvas Canvas
if *size != "" {
width, height, err := parseSize(*size)
if err != nil {
log.Fatal(err)
}
canvas = Canvas{width, height}
} else {
canvas = Canvas{
Width: int(geom.Width),
Height: int(geom.Height),
}
}
buf, err := NewBuffer(canvas.Width*canvas.Height*bytesPerPixel, numPages)
if err != nil {
log.Fatal("Could not create shared memory:", err)
}
if err := xshm.AttachChecked(xu.Conn(), segID, uint32(buf.ShmID), false).Check(); err != nil {
log.Fatal("Could not attach shared memory to X server:", err)
}
i := 0
ch := make(chan Frame)
tags := map[string]string{
"DATE_RECORDED": time.Now().UTC().Format("2006-01-02 15:04:05.999"),
"WINDOW_ID": strconv.Itoa(win.ID),
}
vw := NewVideoWriter(canvas, int(*fps), *cfr, tags, os.Stdout)
if err := vw.Start(); err != nil {
log.Fatal("Couldn't write output:", err)
}
chistMu := &sync.Mutex{}
chist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
whist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
rhist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
var lastSlow time.Time
var slows uint64
go func() {
d := time.Second / time.Duration(*fps)
t := time.NewTicker(d)
start := time.Now()
dupped := 0
var prevFrameTime time.Time
first := true
for ts := range t.C {
if rhist.TotalCount()%int64(*fps) == 0 {
chistMu.Lock()
var cbracket hdrhistogram.Bracket
var wbracket hdrhistogram.Bracket
var rbracket hdrhistogram.Bracket
brackets := chist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
cbracket = bracket
}
brackets = whist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
wbracket = bracket
}
brackets = rhist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
rbracket = bracket
}
s := "%d frames, %d dup, started recording %s ago\n" +
"capture latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"write latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"render loop min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"Last slowdown: %s (%d total)\n"
if !first {
s = "\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\r" + s
}
first = false
var dslow interface{}
if lastSlow.IsZero() {
dslow = "never"
} else {
dslow = time.Since(lastSlow).String() + " ago"
}
fmt.Fprintf(os.Stderr, s,
whist.TotalCount(), dupped, time.Since(start),
milliseconds(chist.Min()), milliseconds(chist.Max()), milliseconds(int64(chist.Mean())), milliseconds(int64(chist.StdDev())), cbracket.Quantile, milliseconds(cbracket.ValueAt),
milliseconds(whist.Min()), milliseconds(whist.Max()), milliseconds(int64(whist.Mean())), milliseconds(int64(whist.StdDev())), wbracket.Quantile, milliseconds(wbracket.ValueAt),
milliseconds(rhist.Min()), milliseconds(rhist.Max()), milliseconds(int64(rhist.Mean())), milliseconds(int64(rhist.StdDev())), rbracket.Quantile, milliseconds(rbracket.ValueAt),
dslow, slows)
chistMu.Unlock()
}
var err error
t := time.Now()
select {
case frame := <-ch:
err = vw.SendFrame(frame)
prevFrameTime = frame.Time
default:
dupped++
err = vw.SendFrame(Frame{Time: prevFrameTime.Add(d)})
prevFrameTime = prevFrameTime.Add(d)
}
whist.RecordCorrectedValue(int64(time.Since(t)), int64(d))
if err != nil {
log.Fatal("Couldn't write frame:", err)
}
dt := time.Since(ts)
if dt > d {
lastSlow = time.Now()
slows++
}
rhist.RecordCorrectedValue(int64(dt), int64(d))
}
}()
el := NewEventLoop(xu.Conn())
res := NewResizeMonitor(el, win)
var other chan CaptureEvent
captureEvents := make(chan CaptureEvent, 1)
if *cfr {
other = make(chan CaptureEvent)
go func() {
for {
other <- CaptureEvent{}
}
}()
} else {
if err := damage.Init(xu.Conn()); err != nil {
// XXX fail back gracefully
log.Fatal(err)
}
damage.QueryVersion(xu.Conn(), 1, 1)
dmg := NewDamageMonitor(xu.Conn(), el, win, int(*fps))
other = dmg.C
}
go func() {
for {
var ev CaptureEvent
select {
case ev = <-res.C:
captureEvents <- ev
case ev = <-other:
captureEvents <- ev
}
}
}()
for ev := range captureEvents {
t := time.Now()
if ev.Resized {
// DRY
xproto.FreePixmap(xu.Conn(), pix)
var err error
pix, err = xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
}
w, h, bw := win.Dimensions()
offset := buf.PageOffset(i)
w = min(w, canvas.Width)
h = min(h, canvas.Height)
ts := time.Now()
_, err := xshm.GetImage(xu.Conn(), xproto.Drawable(pix), int16(bw), int16(bw), uint16(w), uint16(h), 0xFFFFFFFF, xproto.ImageFormatZPixmap, segID, uint32(offset)).Reply()
if err != nil {
continue
}
page := buf.Page(i)
if w < canvas.Width || h < canvas.Height {
i = (i + 1) % numPages
dest := buf.Page(i)
for i := range dest {
dest[i] = 0
}
for i := 0; i < h; i++ {
copy(dest[i*canvas.Width*bytesPerPixel:], page[i*w*bytesPerPixel:(i+1)*w*bytesPerPixel])
}
page = dest
}
drawCursor(xu, win, buf, page, canvas)
chistMu.Lock()
chist.RecordValue(int64(time.Since(t)))
chistMu.Unlock()
ch <- Frame{Data: page, Time: ts}
i = (i + 1) % numPages
}
}
func drawCursor(xu *xgbutil.XUtil, win *Window, buf Buffer, page []byte, canvas Canvas) {
// TODO(dh): We don't need to fetch the cursor image every time.
// We could listen to cursor notify events, fetch the cursor if we
// haven't seen it yet, then cache the cursor.
cursor, err := xfixes.GetCursorImage(xu.Conn()).Reply()
if err != nil {
return
}
pos, err := xproto.TranslateCoordinates(xu.Conn(), xu.RootWin(), xproto.Window(win.ID), cursor.X, cursor.Y).Reply()
if err != nil {
return
}
w, h, _ := win.Dimensions()
w = min(w, canvas.Width)
h = min(h, canvas.Height)
if pos.DstY < 0 || pos.DstX < 0 || int(pos.DstY) > h || int(pos.DstX) > w {
// cursor outside of our window
return
}
for i, p := range cursor.CursorImage {
row := i/int(cursor.Width) + int(pos.DstY) - int(cursor.Yhot)
col := i%int(cursor.Width) + int(pos.DstX) - int(cursor.Xhot)
if row >= canvas.Height || col >= canvas.Width || row < 0 || col < 0 {
// cursor is partially off-screen
break
}
off := row*canvas.Width*bytesPerPixel + col*bytesPerPixel
alpha := (p >> 24) + 1
invAlpha := 256 - (p >> 24)
page[off+3] = 255
page[off+2] = byte((alpha*uint32(byte(p>>16)) + invAlpha*uint32(page[off+2])) >> 8)
page[off+1] = byte((alpha*uint32(byte(p>>8)) + invAlpha*uint32(page[off+1])) >> 8)
page[off+0] = byte((alpha*uint32(byte(p>>0)) + invAlpha*uint32(page[off+0])) >> 8)
}
}
func roundDuration(d, m time.Duration) time.Duration {
if m <= 0 {
return d
}
r := d % m
if r < 0 {
r = -r
if r+r < m {
return d + r
}
if d1 := d - m + r; d1 < d {
return d1
}
return d // overflow
}
if r+r < m {
return d - r
}
if d1 := d + m - r; d1 > d {
return d1
}
return d // overflow
}
func milliseconds(di int64) float64 {
d := time.Duration(di)
sec := d / time.Millisecond
nsec := d % time.Millisecond
return float64(sec) + float64(nsec)*1e-6
} | }
func (b Buffer) Page(idx int) []byte {
offset := b.PageOffset(idx) | random_line_split |
xcapture.go | package main
import (
"flag"
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
"sync"
"time"
"unsafe"
"honnef.co/go/xcapture/internal/shm"
"github.com/BurntSushi/xgb"
"github.com/BurntSushi/xgb/composite"
"github.com/BurntSushi/xgb/damage"
xshm "github.com/BurntSushi/xgb/shm"
"github.com/BurntSushi/xgb/xfixes"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/codahale/hdrhistogram"
)
const bytesPerPixel = 4
const numPages = 4
func | (xs ...int) int {
if len(xs) == 0 {
return 0
}
m := xs[0]
for _, x := range xs[1:] {
if x < m {
m = x
}
}
return m
}
// TODO(dh): this definition of a window is specific to Linux. On
// Windows, for example, we wouldn't have an integer specifier for the
// window.
type Window struct {
ID int
mu sync.RWMutex
width int
height int
borderWidth int
}
func (w *Window) SetDimensions(width, height, border int) {
w.mu.Lock()
defer w.mu.Unlock()
w.width = width
w.height = height
w.borderWidth = border
}
func (w *Window) Dimensions() (width, height, border int) {
w.mu.RLock()
defer w.mu.RUnlock()
return w.width, w.height, w.borderWidth
}
type Canvas struct {
Width int
Height int
}
type Frame struct {
Data []byte
Time time.Time
}
type Buffer struct {
Pages int
PageSize int
Data []byte
ShmID int
}
func (b Buffer) PageOffset(idx int) int {
return b.PageSize * idx
}
func (b Buffer) Page(idx int) []byte {
offset := b.PageOffset(idx)
size := b.PageSize
return b.Data[offset : offset+size : offset+size]
}
type BitmapInfoHeader struct {
Size uint32
Width int32
Height int32
Planes uint16
BitCount uint16
Compression [4]byte
SizeImage uint32
XPelsPerMeter int32
YPelsPerMeter int32
ClrUsed uint32
ClrImportant uint32
}
func NewBuffer(pageSize, pages int) (Buffer, error) {
size := pageSize * pages
seg, err := shm.Create(size)
if err != nil {
return Buffer{}, err
}
data, err := seg.Attach()
if err != nil {
return Buffer{}, err
}
sh := &reflect.SliceHeader{
Data: uintptr(data),
Len: size,
Cap: size,
}
b := (*(*[]byte)(unsafe.Pointer(sh)))
return Buffer{
Pages: pages,
PageSize: pageSize,
Data: b,
ShmID: seg.ID,
}, nil
}
type EventLoop struct {
conn *xgb.Conn
mu sync.RWMutex
listeners []chan xgb.Event
}
func NewEventLoop(conn *xgb.Conn) *EventLoop {
el := &EventLoop{conn: conn}
go el.start()
return el
}
func (el *EventLoop) Register(ch chan xgb.Event) {
el.mu.Lock()
defer el.mu.Unlock()
el.listeners = append(el.listeners, ch)
}
func (el *EventLoop) start() {
for {
ev, err := el.conn.WaitForEvent()
if err != nil {
continue
}
el.mu.RLock()
ls := el.listeners
el.mu.RUnlock()
for _, l := range ls {
l <- ev
}
}
}
type CaptureEvent struct {
Resized bool
}
type ResizeMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
win *Window
}
func NewResizeMonitor(el *EventLoop, win *Window) *ResizeMonitor {
res := &ResizeMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
win: win,
}
el.Register(res.elCh)
go res.start()
return res
}
func (res *ResizeMonitor) start() {
for ev := range res.elCh {
if ev, ok := ev.(xproto.ConfigureNotifyEvent); ok {
w, h, bw := res.win.Dimensions()
if int(ev.Width) != w || int(ev.Height) != h || int(ev.BorderWidth) != bw {
w, h, bw = int(ev.Width), int(ev.Height), int(ev.BorderWidth)
res.win.SetDimensions(w, h, bw)
select {
case res.C <- CaptureEvent{true}:
default:
}
}
}
}
}
type DamageMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
conn *xgb.Conn
fps int
win *Window
}
func NewDamageMonitor(conn *xgb.Conn, el *EventLoop, win *Window, fps int) *DamageMonitor {
dmg := &DamageMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
conn: conn,
fps: fps,
win: win,
}
el.Register(dmg.elCh)
go dmg.startDamage()
go dmg.startCursor()
return dmg
}
func (dmg *DamageMonitor) startDamage() {
xdmg, err := damage.NewDamageId(dmg.conn)
if err != nil {
// XXX fall back gracefully
log.Fatal(err)
}
damage.Create(dmg.conn, xdmg, xproto.Drawable(dmg.win.ID), damage.ReportLevelRawRectangles)
for ev := range dmg.elCh {
if _, ok := ev.(damage.NotifyEvent); ok {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func (dmg *DamageMonitor) startCursor() {
var prevCursor struct{ X, Y int }
prevInWindow := true
d := time.Second / time.Duration(dmg.fps)
t := time.NewTicker(d)
for range t.C {
cursor, err := xproto.QueryPointer(dmg.conn, xproto.Window(dmg.win.ID)).Reply()
if err != nil {
log.Println("Couldn't query cursor position:", err)
continue
}
c := struct{ X, Y int }{int(cursor.WinX), int(cursor.WinY)}
if c == prevCursor {
continue
}
prevCursor = c
damaged := false
w, h, _ := dmg.win.Dimensions()
if c.X < 0 || c.Y < 0 || c.X > w || c.Y > h {
if prevInWindow {
// cursor moved out of the window, which requires a redraw
damaged = true
}
prevInWindow = false
} else {
damaged = true
}
if damaged {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func parseSize(s string) (width, height int, err error) {
err = fmt.Errorf("%q is not a valid size specification", s)
if len(s) < 3 {
return 0, 0, err
}
parts := strings.Split(s, "x")
if len(parts) != 2 {
return 0, 0, err
}
width, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid width: %s", err)
}
height, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid height: %s", err)
}
return width, height, err
}
func main() {
fps := flag.Uint("fps", 30, "FPS")
winID := flag.Int("win", 0, "Window ID")
size := flag.String("size", "", "Canvas size in the format WxH in pixels. Defaults to the initial size of the captured window")
cfr := flag.Bool("cfr", false, "Use a constant frame rate")
_ = cfr
flag.Parse()
win := &Window{ID: *winID}
xu, err := xgbutil.NewConn()
if err != nil {
log.Fatal("Couldn't connect to X server:", err)
}
if err := composite.Init(xu.Conn()); err != nil {
log.Fatal("COMPOSITE extension is not available:", err)
}
if err := xfixes.Init(xu.Conn()); err != nil {
log.Fatal("XFIXES extension is not available:", err)
}
xfixes.QueryVersion(xu.Conn(), 1, 0)
if err := xshm.Init(xu.Conn()); err != nil {
// TODO(dh) implement a slower version that is not using SHM
log.Fatal("MIT-SHM extension is not available:", err)
}
if err := composite.RedirectWindowChecked(xu.Conn(), xproto.Window(win.ID), composite.RedirectAutomatic).Check(); err != nil {
if err, ok := err.(xproto.AccessError); ok {
log.Fatal("Can't capture window, another program seems to be capturing it already:", err)
}
log.Fatal("Can't capture window:", err)
}
pix, err := xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
segID, err := xshm.NewSegId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for SHM:", err)
}
// Register event before we query the window size for the first
// time. Otherwise we could race and miss a window resize.
err = xproto.ChangeWindowAttributesChecked(xu.Conn(), xproto.Window(win.ID),
xproto.CwEventMask, []uint32{uint32(xproto.EventMaskStructureNotify)}).Check()
if err != nil {
log.Fatal("Couldn't monitor window for size changes:", err)
}
geom, err := xproto.GetGeometry(xu.Conn(), xproto.Drawable(win.ID)).Reply()
if err != nil {
log.Fatal("Could not determine window dimensions:", err)
}
win.SetDimensions(int(geom.Width), int(geom.Height), int(geom.BorderWidth))
var canvas Canvas
if *size != "" {
width, height, err := parseSize(*size)
if err != nil {
log.Fatal(err)
}
canvas = Canvas{width, height}
} else {
canvas = Canvas{
Width: int(geom.Width),
Height: int(geom.Height),
}
}
buf, err := NewBuffer(canvas.Width*canvas.Height*bytesPerPixel, numPages)
if err != nil {
log.Fatal("Could not create shared memory:", err)
}
if err := xshm.AttachChecked(xu.Conn(), segID, uint32(buf.ShmID), false).Check(); err != nil {
log.Fatal("Could not attach shared memory to X server:", err)
}
i := 0
ch := make(chan Frame)
tags := map[string]string{
"DATE_RECORDED": time.Now().UTC().Format("2006-01-02 15:04:05.999"),
"WINDOW_ID": strconv.Itoa(win.ID),
}
vw := NewVideoWriter(canvas, int(*fps), *cfr, tags, os.Stdout)
if err := vw.Start(); err != nil {
log.Fatal("Couldn't write output:", err)
}
chistMu := &sync.Mutex{}
chist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
whist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
rhist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
var lastSlow time.Time
var slows uint64
go func() {
d := time.Second / time.Duration(*fps)
t := time.NewTicker(d)
start := time.Now()
dupped := 0
var prevFrameTime time.Time
first := true
for ts := range t.C {
if rhist.TotalCount()%int64(*fps) == 0 {
chistMu.Lock()
var cbracket hdrhistogram.Bracket
var wbracket hdrhistogram.Bracket
var rbracket hdrhistogram.Bracket
brackets := chist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
cbracket = bracket
}
brackets = whist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
wbracket = bracket
}
brackets = rhist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
rbracket = bracket
}
s := "%d frames, %d dup, started recording %s ago\n" +
"capture latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"write latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"render loop min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"Last slowdown: %s (%d total)\n"
if !first {
s = "\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\r" + s
}
first = false
var dslow interface{}
if lastSlow.IsZero() {
dslow = "never"
} else {
dslow = time.Since(lastSlow).String() + " ago"
}
fmt.Fprintf(os.Stderr, s,
whist.TotalCount(), dupped, time.Since(start),
milliseconds(chist.Min()), milliseconds(chist.Max()), milliseconds(int64(chist.Mean())), milliseconds(int64(chist.StdDev())), cbracket.Quantile, milliseconds(cbracket.ValueAt),
milliseconds(whist.Min()), milliseconds(whist.Max()), milliseconds(int64(whist.Mean())), milliseconds(int64(whist.StdDev())), wbracket.Quantile, milliseconds(wbracket.ValueAt),
milliseconds(rhist.Min()), milliseconds(rhist.Max()), milliseconds(int64(rhist.Mean())), milliseconds(int64(rhist.StdDev())), rbracket.Quantile, milliseconds(rbracket.ValueAt),
dslow, slows)
chistMu.Unlock()
}
var err error
t := time.Now()
select {
case frame := <-ch:
err = vw.SendFrame(frame)
prevFrameTime = frame.Time
default:
dupped++
err = vw.SendFrame(Frame{Time: prevFrameTime.Add(d)})
prevFrameTime = prevFrameTime.Add(d)
}
whist.RecordCorrectedValue(int64(time.Since(t)), int64(d))
if err != nil {
log.Fatal("Couldn't write frame:", err)
}
dt := time.Since(ts)
if dt > d {
lastSlow = time.Now()
slows++
}
rhist.RecordCorrectedValue(int64(dt), int64(d))
}
}()
el := NewEventLoop(xu.Conn())
res := NewResizeMonitor(el, win)
var other chan CaptureEvent
captureEvents := make(chan CaptureEvent, 1)
if *cfr {
other = make(chan CaptureEvent)
go func() {
for {
other <- CaptureEvent{}
}
}()
} else {
if err := damage.Init(xu.Conn()); err != nil {
// XXX fail back gracefully
log.Fatal(err)
}
damage.QueryVersion(xu.Conn(), 1, 1)
dmg := NewDamageMonitor(xu.Conn(), el, win, int(*fps))
other = dmg.C
}
go func() {
for {
var ev CaptureEvent
select {
case ev = <-res.C:
captureEvents <- ev
case ev = <-other:
captureEvents <- ev
}
}
}()
for ev := range captureEvents {
t := time.Now()
if ev.Resized {
// DRY
xproto.FreePixmap(xu.Conn(), pix)
var err error
pix, err = xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
}
w, h, bw := win.Dimensions()
offset := buf.PageOffset(i)
w = min(w, canvas.Width)
h = min(h, canvas.Height)
ts := time.Now()
_, err := xshm.GetImage(xu.Conn(), xproto.Drawable(pix), int16(bw), int16(bw), uint16(w), uint16(h), 0xFFFFFFFF, xproto.ImageFormatZPixmap, segID, uint32(offset)).Reply()
if err != nil {
continue
}
page := buf.Page(i)
if w < canvas.Width || h < canvas.Height {
i = (i + 1) % numPages
dest := buf.Page(i)
for i := range dest {
dest[i] = 0
}
for i := 0; i < h; i++ {
copy(dest[i*canvas.Width*bytesPerPixel:], page[i*w*bytesPerPixel:(i+1)*w*bytesPerPixel])
}
page = dest
}
drawCursor(xu, win, buf, page, canvas)
chistMu.Lock()
chist.RecordValue(int64(time.Since(t)))
chistMu.Unlock()
ch <- Frame{Data: page, Time: ts}
i = (i + 1) % numPages
}
}
func drawCursor(xu *xgbutil.XUtil, win *Window, buf Buffer, page []byte, canvas Canvas) {
// TODO(dh): We don't need to fetch the cursor image every time.
// We could listen to cursor notify events, fetch the cursor if we
// haven't seen it yet, then cache the cursor.
cursor, err := xfixes.GetCursorImage(xu.Conn()).Reply()
if err != nil {
return
}
pos, err := xproto.TranslateCoordinates(xu.Conn(), xu.RootWin(), xproto.Window(win.ID), cursor.X, cursor.Y).Reply()
if err != nil {
return
}
w, h, _ := win.Dimensions()
w = min(w, canvas.Width)
h = min(h, canvas.Height)
if pos.DstY < 0 || pos.DstX < 0 || int(pos.DstY) > h || int(pos.DstX) > w {
// cursor outside of our window
return
}
for i, p := range cursor.CursorImage {
row := i/int(cursor.Width) + int(pos.DstY) - int(cursor.Yhot)
col := i%int(cursor.Width) + int(pos.DstX) - int(cursor.Xhot)
if row >= canvas.Height || col >= canvas.Width || row < 0 || col < 0 {
// cursor is partially off-screen
break
}
off := row*canvas.Width*bytesPerPixel + col*bytesPerPixel
alpha := (p >> 24) + 1
invAlpha := 256 - (p >> 24)
page[off+3] = 255
page[off+2] = byte((alpha*uint32(byte(p>>16)) + invAlpha*uint32(page[off+2])) >> 8)
page[off+1] = byte((alpha*uint32(byte(p>>8)) + invAlpha*uint32(page[off+1])) >> 8)
page[off+0] = byte((alpha*uint32(byte(p>>0)) + invAlpha*uint32(page[off+0])) >> 8)
}
}
func roundDuration(d, m time.Duration) time.Duration {
if m <= 0 {
return d
}
r := d % m
if r < 0 {
r = -r
if r+r < m {
return d + r
}
if d1 := d - m + r; d1 < d {
return d1
}
return d // overflow
}
if r+r < m {
return d - r
}
if d1 := d + m - r; d1 > d {
return d1
}
return d // overflow
}
func milliseconds(di int64) float64 {
d := time.Duration(di)
sec := d / time.Millisecond
nsec := d % time.Millisecond
return float64(sec) + float64(nsec)*1e-6
}
| min | identifier_name |
converter.rs | use spirv_cross::{
glsl,
spirv,
};
use shaderc;
use std::{
iter,
path::{Path, PathBuf},
fs::File,
io::Read,
collections::HashMap,
};
use GlslVersion;
use Stage;
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn | () -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> {
let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path
}))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
{
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false,
},
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
}
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array, .. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image { .. } |
spirv::Type::SampledImage { .. } |
spirv::Type::Sampler { .. } |
spirv::Type::AtomicCounter { .. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf, String>
where P: AsRef<Path>
{
source_paths.iter()
.filter_map(|path| {
let file_path = path.as_ref().join(name);
if file_path.exists() {
Some(file_path)
} else {
None
}
})
.next()
.ok_or_else(|| format!(
"unable to find shader file `{}` in search paths:\n{}",
name,
source_paths.iter()
.map(|path| format!(" * `{}`", path.as_ref().to_string_lossy()))
.collect::<Vec<_>>()
.join("\n"),
))
}
| default | identifier_name |
converter.rs | use spirv_cross::{
glsl,
spirv,
};
use shaderc;
use std::{
iter,
path::{Path, PathBuf},
fs::File,
io::Read,
collections::HashMap,
};
use GlslVersion;
use Stage;
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn default() -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> {
let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path
}))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
|
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array, .. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image { .. } |
spirv::Type::SampledImage { .. } |
spirv::Type::Sampler { .. } |
spirv::Type::AtomicCounter { .. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf, String>
where P: AsRef<Path>
{
source_paths.iter()
.filter_map(|path| {
let file_path = path.as_ref().join(name);
if file_path.exists() {
Some(file_path)
} else {
None
}
})
.next()
.ok_or_else(|| format!(
"unable to find shader file `{}` in search paths:\n{}",
name,
source_paths.iter()
.map(|path| format!(" * `{}`", path.as_ref().to_string_lossy()))
.collect::<Vec<_>>()
.join("\n"),
))
}
| {
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false,
},
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
} | identifier_body |
converter.rs | use spirv_cross::{
glsl,
spirv,
};
use shaderc;
use std::{
iter,
path::{Path, PathBuf},
fs::File,
io::Read,
collections::HashMap,
};
use GlslVersion;
use Stage;
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn default() -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> { | }))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
{
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false,
},
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
}
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array, .. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image { .. } |
spirv::Type::SampledImage { .. } |
spirv::Type::Sampler { .. } |
spirv::Type::AtomicCounter { .. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf, String>
where P: AsRef<Path>
{
source_paths.iter()
.filter_map(|path| {
let file_path = path.as_ref().join(name);
if file_path.exists() {
Some(file_path)
} else {
None
}
})
.next()
.ok_or_else(|| format!(
"unable to find shader file `{}` in search paths:\n{}",
name,
source_paths.iter()
.map(|path| format!(" * `{}`", path.as_ref().to_string_lossy()))
.collect::<Vec<_>>()
.join("\n"),
))
} | let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path | random_line_split |
controllers.js | angular.module('app.controllers', [])
/* 00-settings */
.controller('SettingsCtrl', function(
$scope,
$rootScope,
$state,
Dropbox,
Dialogs) {
console.log('```` Rendering Settings');
$rootScope.isWelcomePage = false;
$scope.confirmEvent = function() {
localStorage['event_folder'] = $rootScope.settings.event_folder;
EVENT_FOLDER = localStorage['event_folder'];
console.log(EVENT_FOLDER);
};
$scope.confirmDropboxToken = function() {
localStorage['dropbox_token'] = $rootScope.settings.dropbox_token;
DROPBOX_TOKEN = localStorage['dropbox_token'];
};
$scope.confirmMandrillEmail = function() {
localStorage['mandrill_email'] = $rootScope.settings.mandrill_email;
POSTMARK_EMAIL = localStorage['mandrill_email'];
};
$scope.confirmMandrillToken = function() {
localStorage['mandrill_token'] = $rootScope.settings.mandrill_token;
POSTMARK_TOKEN = localStorage['mandrill_token'];
};
$scope.confirmFacebookAppId = function() {
localStorage['facebook_app_id'] = $rootScope.settings.facebook_app_id;
FACEBOOK_APP_ID = localStorage['facebook_app_id'];
};
$scope.confirmTwitterApiKey = function() {
localStorage['twitter_api_key'] = $rootScope.settings.twitter_api_key;
TWITTER_API_KEY = localStorage['twitter_api_key'];
};
$scope.confirmTwitterSecretKey = function() {
localStorage['twitter_secret_key'] = $rootScope.settings.twitter_secret_key;
TWITTER_SECRET_KEY = localStorage['twitter_secret_key'];
};
$scope.back = function() {
Dropbox.getSettings(function(res) {
console.log(res);
EVENT_NAME = res.event_name;
WELCOME_BG = '/' + DROPBOX_FOLDER + '/' + getEventFolder() + '/src_img/welcome_bg.jpg';
$rootScope.msgToShare = res.share_comment;
// Dropbox.returnDirectLink(WELCOME_BG, function(d) {
$rootScope.backgroundBg = $rootScope.welcomeBg;
console.log($rootScope.backgroundBg); // √
$state.go('/01-welcome');
if (window.cordova) {
$timeout(function() {
$cordovaSplashscreen.hide();
}, 1000);
}
// });
}, function(err) {
console.log(err);
if (err === null) {
err = {};
err.error = 'No internet connection.';
}
/*
Cannot get settings file
– EVENT_FOLDER does not exist
– or, Internet connection lost
*/
Dialogs.alert('settings.json | ' + err.error, 'OK', function() {
$state.go('/00-settings');
});
});
};
})
/* 01-welcome */
.controller('WelcomeCtrl', function(
$scope,
$rootScope) {
console.log('```` Rendering Welcome');
$rootScope.backgroundPosX = 0;
$rootScope.isWelcomePage = true;
$rootScope.shouldHide = true;
})
/* 02-register */
.controller('RegisterCtrl', function(
$scope,
$rootScope,
$state,
$ionicViewSwitcher,
Dropbox,
Dialogs,
$ionicLoading,
Mandrill) {
console.log('```` Rendering Register');
$scope.user = {};
$rootScope.isWelcomePage = false;
$scope.cancel = function() {
$ionicViewSwitcher.nextDirection('back');
$rootScope.shouldHide = true;
$state.go('/01-welcome');
};
$scope.checkInput = function() {
// console.log('Check input fired');
var reName = /^[a-z ,.'-]+$/i;
var reEmail = /^(([^<>()[\]\\.,;:\s@"]+(\.[^<>()[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/;
var isNameOk = reName.test($scope.user.name);
var isEmailOk = reEmail.test($scope.user.email);
if (isNameOk && isEmailOk) {
$scope.isFormValid = true;
} else {
$scope.isFormValid = false;
}
};
$scope.register = function() {
// form validation
if ($scope.isFormValid) {
// Spinner
$ionicLoading.show({
animation: 'fade-in',
showBackdrop: true,
maxWidth: 200,
showDelay: 0
});
Mandrill.sendMail($scope.user.name, $scope.user.email, function() {
// Save a user
// find a file: users_iPadID.json
Dropbox.appendUser($scope.user.name, $scope.user.email, function() {
console.log('User saved.');
$rootScope.userToSend = $scope.user.name;
$rootScope.emailToSend = $scope.user.email;
});
// Load images
Dropbox.getImages(function() {
$rootScope.gallery = $rootScope.gallery.chunk(6);
$ionicViewSwitcher.nextDirection('forward');
$state.go('/03-gallery');
});
});
} else {
Dialogs.alert('One or more of your inputs is invalid. Please try again.', 'Got it', function() {});
}
};
})
/* 03-image-selection */
.controller('GalleryCtrl', function(
$scope,
$rootScope,
$cordovaOauth,
$cordovaFile,
$cordovaPrinter,
$cordovaSocialSharing,
$cordovaFileTransfer,
$timeout,
Mandrill,
ngDialog,
Dialogs,
$ionicLoading,
$ionicSlideBoxDelegate,
Dropbox) {
// imageLoaded
console.log('```` Rendering Gallery');
// $rootScope.backgroundPosX = -80; | animation: 'fade-in',
showBackdrop: true,
maxWidth: 200,
showDelay: 0
});
$ionicSlideBoxDelegate.slide(0, 500);
$timeout(function() {
Dropbox.getImages(function() {
$rootScope.gallery = $rootScope.gallery.chunk(6);
$ionicSlideBoxDelegate.update();
// Update Slide
$timeout(function() {
$ionicLoading.hide();
}, 1000);
});
}, 1000);
};
$scope.openImageModal = function(imgurl) {
console.log('open image modal');
IMG_TO_SHARE = imgurl;
$scope.imgToShare = IMG_TO_SHARE;
ngDialog.open({
template: 'views/imageModal.html',
scope: $scope,
controller: function($scope, $rootScope) {
console.log('you just selected %s', IMG_TO_SHARE);
/*** TWITTER LOGIN ***/
$scope.shareViaTwitter = function() {
if (window.cordova) {
window.cookies.clear(function() {
console.log('Cookies cleared!');
});
}
$cordovaOauth.twitter(TWITTER_API_KEY, TWITTER_SECRET_KEY)
.then(function(result) {
console.log('twitter login results: ' + JSON.stringify(result, null, '\t'));
/*example result object:
{ "oauth_token": "2795506425-A7gBaNkh1cKbNUKkivnjtldMVvbJ7AXlL4BdC4I",
"oauth_token_secret": "DLIy2ux3n2U4Aq6wcoSIiyNlm7KcEiEzFpNcbGMQwOyJh",
"user_id": "2795506425", "screen_name": "momentus_io" } */
$rootScope.socialToShare = 'Twitter';
$rootScope.twitter_token = result.oauth_token;
$rootScope.twitter_secret_token = result.oauth_token_secret;
$scope.closeThisDialog();
$rootScope.goToPage('/04-share');
}, function(error) {
console.log('twitter login error: ' + JSON.stringify(error));
$rootScope.isErrorSignIn = true;
Dialogs.alert('Unable to complete the sign-in process. Please try again.', 'Got it');
});
};
/*** FACEBOOK LOGIN ***/
$scope.shareViaFacebook = function() {
if (window.cordova) {
window.cookies.clear(function() {
console.log('Cookies cleared!');
});
}
$cordovaOauth.facebook(FACEBOOK_APP_ID, ['email', 'publish_actions'])
.then(function(result) {
console.log('fb login results: ' + JSON.stringify(result, null, '\t'));
$rootScope.socialToShare = 'Facebook';
$rootScope.fb_token = result.access_token;
$scope.closeThisDialog();
$rootScope.goToPage('/04-share');
}, function(error) {
console.log('error: ' + error);
$rootScope.isErrorSignIn = true;
Dialogs.alert('Unable to complete the sign-in process. Please try again.', 'Got it');
});
};
/*** MANDRILL ***/
$scope.shareViaEmail = function() {
console.log('Share via Email');
Dialogs.confirm('Do you want to share this photo to your registered email?', ['Cancel', 'Send Email'], function() {
// cancel
console.log('COMFIRM EMAIL SEND');
$scope.closeThisDialog();
}, function() {
// send email
// goto thank you
console.log('COMPILING TPL');
// console.log(EMAIL_TPL_PHOTO);
var compiled = _.template(EMAIL_TPL_PHOTO);
EMAIL_PHOTO_COMPILED = compiled({
'source_image': IMG_TO_SHARE
});
// console.log(EMAIL_TPL_PHOTO);
Mandrill.sharePhoto($rootScope.userToSend, $rootScope.emailToSend, function() {
$rootScope.socialToShare = 'Email';
// $scope.closeThisDialog();
// $rootScope.goToPage('/05-thankyou');
Dialogs.confirm('Your photo has been sent. Would you like to share another?', ['No, I\'m Finished', 'Share Again'], function() {
// No
$scope.closeThisDialog();
$rootScope.goToPage('/05-thankyou');
}, function() {
// Yes
$scope.closeThisDialog();
});
});
});
};
/*** AIR PRINT ***/
$scope.shareViaPrint = function() {
console.log('HIT PRINTER');
var page =
'<body style="margin: 0; padding: 0;"><div style="margin: 0; padding: 0px; position: absolute; top: 0px; left: 0px; width: 100%; height: 100%; background: url(' + $rootScope.overlayImg + ') no-repeat; background-size: cover; background-position: 50%;"><center><div style="position: relative; margin-top: 170px;"><img width="80%" src="' + IMG_TO_SHARE + '"></div></center></div></body>';
cordova.plugins.printer.print(page, 'Document.html', function() {
$rootScope.socialToShare = 'Print';
// $scope.closeThisDialog();
// $rootScope.goToPage('/05-thankyou');
Dialogs.confirm('Your photo has been sent to the printer. Would you like to share another?', ['No, I\'m Finished', 'Share Again'], function() {
// No
$scope.closeThisDialog();
$rootScope.goToPage('/05-thankyou');
}, function() {
// Yes
$scope.closeThisDialog();
});
});
}; // end shareViaPrint
} // end controller
}); // end ngDialog.open
}; // end openImageModal
})
/* 04-share */
.controller('ShareCtrl', function(
$scope,
$rootScope,
$ionicViewSwitcher,
$http,
$cordovaSocialSharing,
$cordovaFile,
Dialogs,
Dropbox,
$ionicLoading,
$state) {
console.log('```` Rendering Share');
// $rootScope.backgroundPosX = -120;
$scope.imgToShare = IMG_TO_SHARE;
$scope.back = function() {
$ionicViewSwitcher.nextDirection('back');
$state.go('/03-gallery');
};
$scope.postOnTwitter = function(msgtoshare) {
console.log("hit postOnTwitter.");
$ionicLoading.show({
animation: 'fade-in',
showBackdrop: true,
maxWidth: 200,
showDelay: 0
});
/** DOWNLOAD FILE FROM DROPBOX **/
Dropbox.downloadFile(IMG_TO_SHARE, 'share_img.jpg', function(e, result) {
if (e) return console.log("error downloading file.");
console.log("img download SUCCESS. result: \n" + JSON.stringify(result, null, '\t'));
$scope.localFile = result.nativeUrl;
/** READ FILE AS BASE64 STRING **/
$cordovaFile.readAsDataURL(cordova.file.documentsDirectory, 'share_img.jpg') //$cordovaFile.readAsBinaryString(cordova.file.documentsDirectory, 'downloadedImage.jpg')
.then(function(success) {
console.log(">> Finished Encoding File as base_64"); // console.log("readAsDataURL SUCCESS: "+JSON.stringify(success, null, '\t'));
$rootScope.codeBird.setToken(($rootScope.twitter_token).toString(), ($rootScope.twitter_secret_token).toString());
var base_64 = success.substr(success.indexOf(",") + 1, success.length); // var base_64 = success;
// var base_64_test_img = 'iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAB+0lEQVR42mP8//8/Ay0BEwONwagFoxZQDljI0PP8x7/Z93/e+PxXmpMpXp5dh4+ZgYHh0bd/clxYnMuINaMtfvRLgp3RVZwVU+rkuz+eRz+//wXVxcrEkKnEceXTX0dRlhoNTmKDaOvzXwHHv6x9+gtN/M9/hpjTX+GmMzAw/P7HMOnOj+ff//35x/Ds+z9iLfjPwPDt7//QE1/Sz319/RNh3PkPf+58+Yup/t7Xf9p8zFKcTMRa4CLGCrFm1v2fSjs+pJ/7uuvl7w+//yO7HRkUq3GEyrCREMk+kqy2IiyH3/xhYGD48uf/rPs/Z93/yczIwM3CiFU9Hw5xnD4ouvTt4Tf0AP37n+HTb+w+UOBmIs2CICm2R9/+EZlqGRkYzIVYSLMgRIYtUYGdSAsMBFgUuJhIy2iMDAwt2pysjAwLHv78RcgnOcrs5BQVHEyMG579Imi6Nh9zrBxZFgixMW624pXnwldYcTAzLjDhZmUit7AzE2K54c7fp8eF1QhWRobFptwmgiwkF3b//jMwMjJ8+P3/zPs/yx/9Wvr412+MgBJlZ1xsyuOOrbAibMHH3/87b32fce/nR2ypnpuFMVGevU6TQ5SdqKKeEVez5cuf/7te/j727s+9L/++/v3PzcyowM1kIcTiLs7Kz8pIfNnOONouGrVg1AIGAJ6gvN4J6V9GAAAAAElFTkSuQmCC'};
var params = {
'media': base_64
};
/** MEDIA_UPLOAD TO TWITTER **/
$rootScope.codeBird.__call(
'media_upload', params,
function(reply) {
if (reply.httpstatus == 200) {
/** STATUS_UPDATE TO TWITTER **/
console.log('media_upload reply: ' + JSON.stringify(reply, null, '\t'));
params = {
'media_ids': reply.media_id_string,
'status': msgtoshare
};
$rootScope.codeBird.__call(
'statuses_update', params,
function(statusUpdateReply) {
console.log('statuses-update reply: ' + JSON.stringify(statusUpdateReply, null, '\t'));
if (statusUpdateReply.httpstatus == 200) {
/** SUCCESS, DONE **/
$rootScope.goToPage('/05-thankyou');
} else Dialogs.alert('Unable to post to Twitter. Please try again or choose another sharing option.', 'Got it');
}
);
} else Dialogs.alert('Unable to post to Twitter. Please try again or choose another sharing option.', 'Got it');
}
);
}, function(error) {
console.log("readAsDataURL ERROR: " + JSON.stringify(error));
});
});
};
$scope.postOnFb = function(msgtoshare) {
$ionicLoading.show({
animation: 'fade-in',
showBackdrop: true,
maxWidth: 200,
showDelay: 0
});
$http.get('https://graph.facebook.com/v2.2/me?access_token=' + $rootScope.fb_token.toString())
.success(function(data, status, headers, config) {
console.log("get headers: " + JSON.stringify(headers));
console.log("get user data: " + JSON.stringify(data));
var user_id = data.id;
// var msgToPost = $rootScope.msgToShare;
// var photoToPost = "http://itchmo.com/wp-content/uploads/2007/06/p48118p.jpg";
// console.log(msgtoshare);
var msgWebSafe = escape(msgtoshare)
.replace(/\@/g, '%40')
.replace(/\*/g, '%2A')
.replace(/\//g, '%2F')
.replace(/\+/g, '%2B');
var postURL = 'https://graph.facebook.com/v2.2/' + user_id + '/photos?access_token=' + $rootScope.fb_token + '&url=' + IMG_TO_SHARE + '&message=' + msgWebSafe;
console.log(postURL);
$http({
method: "POST",
url: postURL
}).
success(function(data) {
// alert("POST SUCCESSFUL");
console.log("success on POST: " + JSON.stringify(data));
$rootScope.goToPage('/05-thankyou');
}).
error(function(data) {
console.log("error on POST: " + JSON.stringify(data));
Dialogs.alert('Unable to post a photo on your Facebook. Please try again or choose another sharing option.', 'Got it');
});
}).
error(function(data, status, headers, config) {
console.log("error data: " + JSON.stringify(data));
console.log("error status: " + status);
});
};
})
/* 05-thankyou */
.controller('ThankYouCtrl', function(
$scope,
$rootScope,
$timeout,
$state) {
console.log('```` Rendering ThankYou');
// $rootScope.backgroundPosX = -160;
$timeout(function() {
PREV_NOW = new Date().getTime();
$rootScope.startOver();
}, 10000);
}); |
$scope.refreshGallery = function() {
console.log('refreshing gallery');
$ionicLoading.show({ | random_line_split |
main.rs | #![no_std]
#![no_main]
// pick a panicking behavior
// extern crate panic_halt; // you can put a breakpoint on `rust_begin_unwind` to catch panics
// extern crate panic_abort; // requires nightly
// extern crate panic_itm; // logs messages over ITM; requires ITM support
extern crate panic_semihosting; // logs messages to the host stderr; requires a debugger
#[macro_use(block)]
extern crate nb;
use numtoa::NumToA;
use cortex_m_rt::entry;
use stm32f4xx_hal as hal;
use crate::hal::rcc::Clocks;
use crate::hal::serial::config::Config;
use stm32f4;
use stm32f4xx_hal::gpio::gpiod::{PD0, PD1, PD8, PD9};
use stm32f4xx_hal::stm32::USART3;
use crate::hal::{prelude::*, serial::Serial, stm32, time::Bps};
#[entry]
fn main() -> ! {
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if !can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if !can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit()); |
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
//
// Can has 3 modes: Initialization, Normal, Sleep
// Set INRQ bit in CAN_MCR to enter initialization mode
// Wait for INAK bit to be set in CAN_MSR register
// Setup bit timing (CAN_BTR register) and CAN options (CAN_MCR registers)
// Clean INRQ bit to enter normal mode (Must wait for INAK to be cleared after 11 recessive cycles on the bus)
// Silent mode
// Entered by setting SILM bit in the CAN_BTR register
// Does not respond on the bus, perfect for logging
// Loop back mode
// Set LBKM in CAN_BTR register
// Stores transmitted messages in receive mailbox
// Transmit flow
// 1. Find an empty TX mailbox
// 2. Setup identifier, data length code, and data im empty TX mailbox
// 3. Set TXRQ bit in CAN_TIxR register to request the transmission start
// 4. Transmission success indicated by RWCP and TXOK bits set in CAN_TSR register
// Failure indicated by ALST bit in CAN_Tsr for arbitration lost or TERR bit for transmission error
//
// Transmit priority
// Can be set to use identifier priority or FIFO by setting TXFP bit in CAN_MCR register
// Receive Flow
// Received after the message is completed and passed through the identifier filtering
// FMP[1:0] bits in CAN_RFR rigster indicates messages available in the FIFO
// Interrupts can be generated by setting FFIE bit in CAN_IER register
// Read from FIFO output mailbox, release the mailbox using the RFOM bit in CAN_RFR register
// Bit Timing
// Time is split into three segments
// Synchronization
// Segment 1
// Segment 2
// Baud rate = 1 / NominalBitTime
// NominalBitTime = 1 x t_q + t_bs1 + t_bs2
// t_bs1 = t_q x (TS1[3:0] + 1)
// t_bs1 = t_q x (TS2[3:0] + 1)
// t_q = (BRP[9:0] + 1) x t_pclk
// Need to find this
// t_pclk = time period of the APB clock
// CAN is on APB1 which is 50 MHz
// Baud Rate Prescaler = 24 (24 + 1)
// t_q = 0.5us
// t_bs1 = 9 (8 + 1)
// t_bs2 = 6 (5 + 1)
// Filter setup
// Can be setup while in initialization or normal mode
// Must set FINIT bit in CAN_FMR to modify the filters
// CAN reception is deactivated when FINIT = 1
// Notes:
// 1. Hard to tell if there are three or two can controllers | random_line_split | |
main.rs | #![no_std]
#![no_main]
// pick a panicking behavior
// extern crate panic_halt; // you can put a breakpoint on `rust_begin_unwind` to catch panics
// extern crate panic_abort; // requires nightly
// extern crate panic_itm; // logs messages over ITM; requires ITM support
extern crate panic_semihosting; // logs messages to the host stderr; requires a debugger
#[macro_use(block)]
extern crate nb;
use numtoa::NumToA;
use cortex_m_rt::entry;
use stm32f4xx_hal as hal;
use crate::hal::rcc::Clocks;
use crate::hal::serial::config::Config;
use stm32f4;
use stm32f4xx_hal::gpio::gpiod::{PD0, PD1, PD8, PD9};
use stm32f4xx_hal::stm32::USART3;
use crate::hal::{prelude::*, serial::Serial, stm32, time::Bps};
#[entry]
fn main() -> ! |
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
//
// Can has 3 modes: Initialization, Normal, Sleep
// Set INRQ bit in CAN_MCR to enter initialization mode
// Wait for INAK bit to be set in CAN_MSR register
// Setup bit timing (CAN_BTR register) and CAN options (CAN_MCR registers)
// Clean INRQ bit to enter normal mode (Must wait for INAK to be cleared after 11 recessive cycles on the bus)
// Silent mode
// Entered by setting SILM bit in the CAN_BTR register
// Does not respond on the bus, perfect for logging
// Loop back mode
// Set LBKM in CAN_BTR register
// Stores transmitted messages in receive mailbox
// Transmit flow
// 1. Find an empty TX mailbox
// 2. Setup identifier, data length code, and data im empty TX mailbox
// 3. Set TXRQ bit in CAN_TIxR register to request the transmission start
// 4. Transmission success indicated by RWCP and TXOK bits set in CAN_TSR register
// Failure indicated by ALST bit in CAN_Tsr for arbitration lost or TERR bit for transmission error
//
// Transmit priority
// Can be set to use identifier priority or FIFO by setting TXFP bit in CAN_MCR register
// Receive Flow
// Received after the message is completed and passed through the identifier filtering
// FMP[1:0] bits in CAN_RFR rigster indicates messages available in the FIFO
// Interrupts can be generated by setting FFIE bit in CAN_IER register
// Read from FIFO output mailbox, release the mailbox using the RFOM bit in CAN_RFR register
// Bit Timing
// Time is split into three segments
// Synchronization
// Segment 1
// Segment 2
// Baud rate = 1 / NominalBitTime
// NominalBitTime = 1 x t_q + t_bs1 + t_bs2
// t_bs1 = t_q x (TS1[3:0] + 1)
// t_bs1 = t_q x (TS2[3:0] + 1)
// t_q = (BRP[9:0] + 1) x t_pclk
// Need to find this
// t_pclk = time period of the APB clock
// CAN is on APB1 which is 50 MHz
// Baud Rate Prescaler = 24 (24 + 1)
// t_q = 0.5us
// t_bs1 = 9 (8 + 1)
// t_bs2 = 6 (5 + 1)
// Filter setup
// Can be setup while in initialization or normal mode
// Must set FINIT bit in CAN_FMR to modify the filters
// CAN reception is deactivated when FINIT = 1
// Notes:
// 1. Hard to tell if there are three or two can controllers
| {
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if !can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if !can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
} | identifier_body |
main.rs | #![no_std]
#![no_main]
// pick a panicking behavior
// extern crate panic_halt; // you can put a breakpoint on `rust_begin_unwind` to catch panics
// extern crate panic_abort; // requires nightly
// extern crate panic_itm; // logs messages over ITM; requires ITM support
extern crate panic_semihosting; // logs messages to the host stderr; requires a debugger
#[macro_use(block)]
extern crate nb;
use numtoa::NumToA;
use cortex_m_rt::entry;
use stm32f4xx_hal as hal;
use crate::hal::rcc::Clocks;
use crate::hal::serial::config::Config;
use stm32f4;
use stm32f4xx_hal::gpio::gpiod::{PD0, PD1, PD8, PD9};
use stm32f4xx_hal::stm32::USART3;
use crate::hal::{prelude::*, serial::Serial, stm32, time::Bps};
#[entry]
fn main() -> ! {
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if !can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if !can1.msr.read().inak().bit() |
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
//
// Can has 3 modes: Initialization, Normal, Sleep
// Set INRQ bit in CAN_MCR to enter initialization mode
// Wait for INAK bit to be set in CAN_MSR register
// Setup bit timing (CAN_BTR register) and CAN options (CAN_MCR registers)
// Clean INRQ bit to enter normal mode (Must wait for INAK to be cleared after 11 recessive cycles on the bus)
// Silent mode
// Entered by setting SILM bit in the CAN_BTR register
// Does not respond on the bus, perfect for logging
// Loop back mode
// Set LBKM in CAN_BTR register
// Stores transmitted messages in receive mailbox
// Transmit flow
// 1. Find an empty TX mailbox
// 2. Setup identifier, data length code, and data im empty TX mailbox
// 3. Set TXRQ bit in CAN_TIxR register to request the transmission start
// 4. Transmission success indicated by RWCP and TXOK bits set in CAN_TSR register
// Failure indicated by ALST bit in CAN_Tsr for arbitration lost or TERR bit for transmission error
//
// Transmit priority
// Can be set to use identifier priority or FIFO by setting TXFP bit in CAN_MCR register
// Receive Flow
// Received after the message is completed and passed through the identifier filtering
// FMP[1:0] bits in CAN_RFR rigster indicates messages available in the FIFO
// Interrupts can be generated by setting FFIE bit in CAN_IER register
// Read from FIFO output mailbox, release the mailbox using the RFOM bit in CAN_RFR register
// Bit Timing
// Time is split into three segments
// Synchronization
// Segment 1
// Segment 2
// Baud rate = 1 / NominalBitTime
// NominalBitTime = 1 x t_q + t_bs1 + t_bs2
// t_bs1 = t_q x (TS1[3:0] + 1)
// t_bs1 = t_q x (TS2[3:0] + 1)
// t_q = (BRP[9:0] + 1) x t_pclk
// Need to find this
// t_pclk = time period of the APB clock
// CAN is on APB1 which is 50 MHz
// Baud Rate Prescaler = 24 (24 + 1)
// t_q = 0.5us
// t_bs1 = 9 (8 + 1)
// t_bs2 = 6 (5 + 1)
// Filter setup
// Can be setup while in initialization or normal mode
// Must set FINIT bit in CAN_FMR to modify the filters
// CAN reception is deactivated when FINIT = 1
// Notes:
// 1. Hard to tell if there are three or two can controllers
| {
break;
} | conditional_block |
main.rs | #![no_std]
#![no_main]
// pick a panicking behavior
// extern crate panic_halt; // you can put a breakpoint on `rust_begin_unwind` to catch panics
// extern crate panic_abort; // requires nightly
// extern crate panic_itm; // logs messages over ITM; requires ITM support
extern crate panic_semihosting; // logs messages to the host stderr; requires a debugger
#[macro_use(block)]
extern crate nb;
use numtoa::NumToA;
use cortex_m_rt::entry;
use stm32f4xx_hal as hal;
use crate::hal::rcc::Clocks;
use crate::hal::serial::config::Config;
use stm32f4;
use stm32f4xx_hal::gpio::gpiod::{PD0, PD1, PD8, PD9};
use stm32f4xx_hal::stm32::USART3;
use crate::hal::{prelude::*, serial::Serial, stm32, time::Bps};
#[entry]
fn main() -> ! {
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if !can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if !can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn | <X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
//
// Can has 3 modes: Initialization, Normal, Sleep
// Set INRQ bit in CAN_MCR to enter initialization mode
// Wait for INAK bit to be set in CAN_MSR register
// Setup bit timing (CAN_BTR register) and CAN options (CAN_MCR registers)
// Clean INRQ bit to enter normal mode (Must wait for INAK to be cleared after 11 recessive cycles on the bus)
// Silent mode
// Entered by setting SILM bit in the CAN_BTR register
// Does not respond on the bus, perfect for logging
// Loop back mode
// Set LBKM in CAN_BTR register
// Stores transmitted messages in receive mailbox
// Transmit flow
// 1. Find an empty TX mailbox
// 2. Setup identifier, data length code, and data im empty TX mailbox
// 3. Set TXRQ bit in CAN_TIxR register to request the transmission start
// 4. Transmission success indicated by RWCP and TXOK bits set in CAN_TSR register
// Failure indicated by ALST bit in CAN_Tsr for arbitration lost or TERR bit for transmission error
//
// Transmit priority
// Can be set to use identifier priority or FIFO by setting TXFP bit in CAN_MCR register
// Receive Flow
// Received after the message is completed and passed through the identifier filtering
// FMP[1:0] bits in CAN_RFR rigster indicates messages available in the FIFO
// Interrupts can be generated by setting FFIE bit in CAN_IER register
// Read from FIFO output mailbox, release the mailbox using the RFOM bit in CAN_RFR register
// Bit Timing
// Time is split into three segments
// Synchronization
// Segment 1
// Segment 2
// Baud rate = 1 / NominalBitTime
// NominalBitTime = 1 x t_q + t_bs1 + t_bs2
// t_bs1 = t_q x (TS1[3:0] + 1)
// t_bs1 = t_q x (TS2[3:0] + 1)
// t_q = (BRP[9:0] + 1) x t_pclk
// Need to find this
// t_pclk = time period of the APB clock
// CAN is on APB1 which is 50 MHz
// Baud Rate Prescaler = 24 (24 + 1)
// t_q = 0.5us
// t_bs1 = 9 (8 + 1)
// t_bs2 = 6 (5 + 1)
// Filter setup
// Can be setup while in initialization or normal mode
// Must set FINIT bit in CAN_FMR to modify the filters
// CAN reception is deactivated when FINIT = 1
// Notes:
// 1. Hard to tell if there are three or two can controllers
| configure | identifier_name |
cfg.py | # cfg.py
HOST = "irc.chat.twitch.tv" # the Twitch IRC server
PORT = 6667 # always use port 6667
NICK = "lautrecbot" # your Twitch username, lowercase
PASS = "REDACTED" # your Twitch OAuth token
CHAN = "#REDACTED" # the channel you want to join
RATE = (100/30) #messages per second
NUM_MESSAGES = 200 #number of chat messages between Lautrec messages
PUNCTUATION = ['.','!','?', ',']
LAUTREC = [
'Oh, still human are you?',
'Then I am in luck. Could you help me?',
'As you can see I am stuck, without recourse.',
'Please, I have duties to fulfil, and I will reward you handsomely.',
'Well? I am certain you stand to benefit.',
'Why! You!',
'Do not run away! Hear me out!',
'Ahh, you have come back.',
'I beg of you. Help me.',
'Thank you, yes, sincerely.',
'I am Knight Lautrec of Carim.',
'I truly appreciate this, and I guarantee a reward, only later.',
'Yes, very sorry, but your reward will have to wait.',
'I have just been freed. Allow me some time.',
'I am free. Now I can get back to work…',
'Keh heh heh heh…',
'Well, what have we here?',
'Keh keh keh. Are you sure about this?',
'When attacked and made hostile',
'You leave me no choice.',
'I was once grateful to you.',
'But if this is our fate, so be it!',
'You despicable…',
'Curses… How could I…',
'Ahh, hello there…',
'I have your reward. Please accept it.',
'I am grateful to you for freeing me.',
'Keh heh heh heh…',
'… Not enough for you? Well, let\'s not be greedy, now…',
'Keh heh heh heh…',
'Hello… I don\'t think we\'ve met.',
'I am Knight Lautrec of Carim.',
'We are both Undead. Perhaps we can help one another.',
'Keh heh heh heh…',
'Hmm, what business do you have?',
'If you have none, then stay silent.',
'…You…',
'How dare you come prancing about!',
'I have nothing to say. Be gone from my sight.',
'Ah, you certainly are keeping busy.',
'Care to pay for a useful tip?',
'A wise choice, indeed.',
'Oh, really?',
'Well, suit yourself. Only trying to help.',
'Hm? That tip I gave you? Ahh, I heard it from a fleeing old man.',
'That poor bastard! All his robes and trinkets won\'t help him now!',
'Kwah hah hah hah hah!',
'Have you heard of Trusty Patches?',
'If ever a man has rubbed me up the wrong way, ugh!',
'If he ever comes around again, I swear, I\'ll have his hide.',
'By the lords… Your face…',
'Hmm… Your humanity is really slipping.',
'But there are methods. Most fools have more humanity than they know what to do with.',
'Now, who do you imagine will make the best use of it, hmm?',
'Well, where have you been?',
'I am glad to see you are safe.',
'Oh, hello.',
'I\'m considering a change of location…',
'I have a rather, pressing matter to attend to up above.',
'That Keeper has served me well, but… enough with her…',
'Keh heh heh heh…',
'Hm, you again? What is it?',
'Our futures are murky. Let\'s not be too friendly, now',
'Well, look at you.',
'I thought you were wiser, but I thought wrong!',
'Tis a terrible pity. Like a moth flittering towards a flame.',
'You fellows? No? Don\'t you agree?',
'When you enter his aggro range, after the first time',
'So, here we go again!',
'How many times will these lambs rush to slaughter?',
'Well, let\'s get it over with.'
]
NOUNS = [
'enemy',
'monster',
'mob enemy',
'tough enemy',
'critical foe',
'hollow',
'pilgrim',
'prisoner',
'monstrosity',
'skeleton',
'ghost',
'beast',
'lizard',
'bug',
'grub',
'crab',
'dwarf',
'giant',
'demon',
'dragon',
'knight',
'sellword',
'warrior',
'herald',
'bandit',
'assassin',
'sorcerer',
'pyromancer',
'cleric',
'deprived',
'sniper',
'duo',
'trio',
'you',
'you bastard',
'good fellow',
'saint',
'wretch',
'charmer',
'poor soul',
'oddball',
'nimble one',
'laggard',
'moneybags',
'beggar',
'miscreant',
'liar',
'fatty',
'beanpole',
'youth',
'elder',
'old codger',
'old dear',
'merchant',
'artisan',
'master',
'sage',
'champion',
'lord of cinder',
'king',
'queen',
'prince',
'princess',
'angel',
'god',
'friend',
'ally',
'spouse',
'covenantor',
'phantom',
'dark spirit',
'bonfire',
'ember',
'fog wall',
'lever',
'contraption',
'key',
'trap',
'torch',
'door',
'treasure',
'chest',
'something',
'quite something',
'rubbish',
'filth',
'weapon',
'shield',
'projectile',
'armor',
'item',
'ring',
'ore',
'coal',
'transposing kiln',
'scroll',
'umbral ash',
'throne',
'rite',
'coffin',
'cinder',
'ash',
'moon',
'eye',
'brew',
'soup',
'message',
'bloodstain',
'illusion',
'close-ranged battle',
'ranged battle',
'eliminating one at a time',
'luring it out',
'beating to a pulp',
'ambush',
'pincer attack',
'hitting them in one swoop',
'duel-wielding',
'stealth',
'mimicry',
'fleeing',
'charging',
'jumping off',
'dashing through',
'circling around',
'trapping inside',
'rescue',
'skill',
'sorcery',
'pyromancy',
'miracles',
'pure luck',
'prudence',
'brief respite',
'play dead',
'jog',
'dash',
'rolling',
'backstepping',
'jumping',
'attacking',
'jump attack',
'dash attack',
'counter attack',
'stabbing in the back',
'guard stun & stab',
'plunging attack',
'shield breaking',
'blocking',
'parrying',
'locking-on',
'no lock-on',
'two-handing',
'gesture',
'control',
'destroy',
'boulder',
'lava',
'poison gas',
'enemy horde',
'forest',
'swamp',
'cave',
'shortcut',
'detour',
'hidden path',
'secret passage',
'dead end',
'labyrinth',
'hole',
'bright spot',
'dark spot',
'open area',
'tight spot',
'safe zone',
'danger zone',
'sniper spot',
'hiding place',
'illusory wall',
'ladder',
'lift',
'gorgeous view',
'looking away',
'overconfidence',
'slip-up',
'oversight',
'fatigue',
'bad luck',
'inattention',
'loss of stamina',
'chance encounter',
'planned encounter',
'front',
'back',
'left',
'right',
'up',
'down',
'below',
'above',
'behind',
'head',
'neck',
'stomach',
'back',
'armor',
'finger',
'leg',
'rear',
'tail',
'wings',
'anywhere',
'tongue',
'right arm',
'left arm',
'thumb',
'indexfinger',
'longfinger',
'ringfinger',
'smallfinger',
'right leg',
'left leg',
'right side',
'left side',
'pincer',
'wheel',
'core',
'mount',
'regular',
'strike',
'thrust',
'slash',
'magic',
'crystal',
'fire',
'chaos',
'lightning',
'blessing',
'dark',
'critical hits',
'bleeding',
'poison',
'toxic',
'frost',
'curse',
'equipment breakage',
'chance',
'quagmire',
'hint',
'secret',
'sleeptalk',
'happiness',
'misfortune',
'life',
'death',
'demise',
'joy',
'fury',
'agony',
'sadness',
'tears',
'loyalty',
'betrayal',
'hope',
'despair',
'fear',
'losing sanity',
'victory',
'defeat',
'sacrifice',
'light',
'dark',
'bravery',
'confidence',
'vigor',
'revenge',
'resignation',
'overwhelming',
'regret',
'pointless',
'man',
'woman',
'friendship',
'love',
'recklessness',
'composure',
'guts',
'comfort',
'silence',
'deep',
'dregs',
'good luck',
'fine work',
'i did it',
'i\'ve failed',
'here',
'not here',
'i can\'t take this',
'lonely',
'don\'t you dare',
'do it',
'look carefully',
'listen carefully',
'think carefully',
'this place again',
'now the real fight begins',
'you don\'t deserve this',
'keep moving',
'pull back',
'give it up',
'don\'t give up',
'help me',
'impossible',
'bloody expensive',
'let me out of here',
'stay calm',
'like a dream',
'seems familiar',
'are you ready',
'it\'ll happen to you too',
'praise the sun',
'may the flames guide thee',
'you\'ve come to the right place',
'bless us with blood',
'may the good blood guide your way',
'fear your blindness',
'the sky and the cosmos are one',
'let us cleanse these foul streets',
'you\'re in the know right',
'oh i can\'t wait hee hee',
'take a step forward',
'turn back',
'those with faith will be spared',
'don\'t be fooled',
'pitiful really',
'behind you',
'don\'t you dare look at me',
'sincerest thanks',
'a hunter is never alone',
'please carry on in my stead',
'run',
'man-beast',
'giant beast',
'abhorrent beast',
'infected one',
'foe',
'strong foe',
'giant foe',
'terrible foe',
'hound',
'bird',
'snake',
'animal',
'insect',
'watcher',
'shaman',
'dead',
'foul spirit',
'the lost',
'malformed thing',
'unknown thing',
'slimy thing',
'blobby thing',
'kin of the cosmos',
'evil eye',
'false god',
'superior being',
'messenger',
'doll',
'elderly',
'ailing one',
'madman',
'keeper',
'mob',
'wheelchair',
'small gent',
'small lady',
'titan',
'amazon',
'dullard',
'scoundrel',
'child',
'darling',
'infant',
'yourself',
'hunter',
'cooperator',
'adversary',
'executioner',
'vileblood',
'hunter of hunters',
'blood-addled hunter',
'physical attack',
'blunt attack',
'thrust attack',
'blood attack',
'arcane',
'bolt',
'quick weapon',
'long weapon',
'frenzy',
'exploiting species',
'beast transformation',
'firearm',
'blunderbuss',
'rally',
'charge attack',
'visceral attack',
'quickstep',
'blood vial',
'quicksilver bullet',
'medicine',
'special medicine',
'oil',
'coarse paper',
'special item',
'\"focus on attacks\"',
'sneak attack',
'patrol',
'reinforcements caller',
'\"focus on evasion\"',
'\"focus on healing\"',
'\"close-range fight\"',
'\"long-range fight\"',
'\"hit-and-run\"',
'sniping',
'counter',
'\"attack from behind\"',
'\"open when attacking\"',
'\"strike and be struck\"',
'\"kill in order\"',
'\"kill first\"',
'charging forth',
'lure',
'ignoring',
'retreat',
'use of terrain',
'high spot',
'fall',
'alertness',
'unbreakable will',
'leaden constitution',
'blood echoes',
'insight',
'bloodstone',
'blood gem',
'rune',
'ritual material',
'paleblood',
'rating',
'dead body',
'statue',
'footing',
'yharnam',
'clinic',
'grand cathedral',
'church',
'safe place',
'old labyrinth',
'workshop',
'healing church',
'unseen village',
'hunting',
'night',
'dawn',
'blood',
'warm blood',
'scourge',
| 'ritual',
'contact',
'encounter',
'evolution',
'oath',
'corruption',
'execution',
'cleansing',
'prayer',
'defilement',
'sinister',
'courage',
'respect',
'inquisitiveness',
'pity',
'grief',
'wrath',
'sanity',
'madness',
'fervor',
'seduction',
'feasting',
'tastiness',
'tonsil',
'metamorphosis',
'common sense',
'darkness',
'singing',
'sobbing',
'howling',
'\"all\'s well\"',
'the unseen',
'all'
]
TEMPLATES = [
'no **** ahead',
'be wary of ****',
'try ****',
'could this be a ****',
'if only i had a ****',
'could this be an ****',
'if only i had an ****',
'visions of ****',
'time for ****',
'huh it\'s a ****',
'huh it\'s an ****',
'praise the ****',
'let there be ****',
'ahh ****',
'**** waits ahead',
'beware of ****',
'it\'s the scourge of ****',
'fear ****',
'need ****',
'reeks of ****',
'remember ****',
'despicable ****',
'woeful ****',
'wondrous ****',
'weakness: ****',
'you must accept ****',
'treat **** with care',
'nothing but **** here',
'it is all thanks to ****',
'imminent ****',
'have mercy ****',
'no mercy for ****',
'have audience with ****',
'reminiscent of ****',
'hurrah for ****',
'oh ****',
'**** is effective',
'**** required ahead',
'**** ahead',
'****'
]
CONJUNCTIONS = [
' and then ',
' therefore ',
' eventually ',
' in short ',
' or ',
' by the way ',
' so to speak ',
' all the more ',
' however '
] | 'nightmare',
'cosmos',
'oedon',
'communion',
'donation',
| random_line_split |
automaton.go | package automaton
import (
"fmt"
"github.com/bits-and-blooms/bitset"
"github.com/geange/lucene-go/core/util"
"sort"
)
// Automaton Represents an automaton and all its states and transitions. States are integers and must be
// created using createState. Mark a state as an accept state using setAccept. Add transitions using
// addTransition. Each state must have all of its transitions added at once; if this is too restrictive
// then use Automaton.Builder instead. State 0 is always the initial state. Once a state is finished,
// either because you've starting adding transitions to another state or you call finishState, then that
// states transitions are sorted (first by min, then max, then dest) and reduced (transitions with adjacent
// labels going to the same dest are combined).
type Automaton struct {
// Where we next write to the int[] states; this increments by 2 for each added state because we
// pack a pointer to the transitions array and a count of how many transitions leave the state.
nextState int
// Where we next write to in int[] transitions; this increments by 3 for each added transition because
// we pack min, max, dest in sequence.
nextTransition int
// Current state we are adding transitions to; the caller must add all transitions for this state
// before moving onto another state.
curState int
// Index in the transitions array, where this states leaving transitions are stored, or -1
// if this state has not added any transitions yet, followed by number of transitions.
states []int
isAccept *bitset.BitSet
// Holds toState, min, max for each transition.
transitions []int
// True if no state has two transitions leaving with the same label.
deterministic bool
}
func NewAutomaton() *Automaton {
return NewAutomatonV1(2, 2)
}
func NewAutomatonV1(numStates, numTransitions int) *Automaton {
return &Automaton{
curState: -1,
deterministic: true,
states: make([]int, numStates*2),
isAccept: bitset.New(uint(numStates)),
transitions: make([]int, numTransitions*3),
}
}
// CreateState Create a new state.
func (r *Automaton) CreateState() int {
r.growStates()
state := r.nextState / 2
r.states[r.nextState] = -1
r.nextState += 2
return state
}
// SetAccept Set or clear this state as an accept state.
func (r *Automaton) SetAccept(state int, accept bool) {
r.isAccept.SetTo(uint(state), accept)
}
// Sugar to get all transitions for all states. This is object-heavy; it's better to iterate state by state instead.
func (r *Automaton) getSortedTransitions() [][]Transition {
numStates := r.GetNumStates()
transitions := make([][]Transition, numStates)
for s := 0; s < numStates; s++ {
numTransitions := r.GetNumTransitionsWithState(s)
transitions[s] = make([]Transition, numTransitions)
for t := 0; t < numTransitions; t++ {
transition := Transition{}
r.getTransition(s, t, &transition)
transitions[s][t] = transition
}
}
return transitions
}
// Returns accept states. If the bit is set then that state is an accept state.
func (r *Automaton) getAcceptStates() *bitset.BitSet {
return r.isAccept
}
// IsAccept Returns true if this state is an accept state.
func (r *Automaton) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
}
// AddTransitionLabel Add a new transition with min = max = label.
func (r *Automaton) AddTransitionLabel(source, dest, label int) error {
return r.AddTransition(source, dest, label, label)
}
// AddTransition Add a new transition with the specified source, dest, min, max.
func (r *Automaton) AddTransition(source, dest, min, max int) error {
//bounds := r.nextState / 2
r.growTransitions()
if r.curState != source {
if r.curState != -1 {
r.finishCurrentState()
}
// Move to next source:
r.curState = source
if r.states[2*r.curState] != -1 {
return fmt.Errorf("from state (%d) already had transitions added", source)
}
r.states[2*r.curState] = r.nextTransition
}
r.transitions[r.nextTransition] = dest
r.nextTransition++
r.transitions[r.nextTransition] = min
r.nextTransition++
r.transitions[r.nextTransition] = max
r.nextTransition++
// Increment transition count for this state
r.states[2*r.curState+1]++
return nil
}
// AddEpsilon Add a [virtual] epsilon transition between source and dest. Dest state must already have all
// transitions added because this method simply copies those same transitions over to source.
func (r *Automaton) AddEpsilon(source, dest int) {
t := Transition{}
count := r.InitTransition(dest, &t)
for i := 0; i < count; i++ {
r.GetNextTransition(&t)
_ = r.AddTransition(source, t.Dest, t.Min, t.Max)
}
if r.IsAccept(dest) {
r.SetAccept(source, true)
}
}
// Copy Copies over all states/transitions from other. The states numbers are sequentially assigned (appended).
func (r *Automaton) Copy(other *Automaton) {
// Bulk copy and then fixup the state pointers:
stateOffset := r.GetNumStates()
r.states = util.Grow(r.states, r.nextState+other.nextState)
copy(r.states[r.nextState:r.nextState+other.nextState], other.states)
for i := 0; i < other.nextState; i += 2 {
if r.states[r.nextState+i] != -1 {
r.states[r.nextState+i] += r.nextTransition
}
}
r.nextState += other.nextState
otherNumStates := other.GetNumStates()
otherAcceptStates := other.getAcceptStates()
state := uint(0)
for {
if state < uint(otherNumStates) {
if state, ok := otherAcceptStates.NextSet(state); ok {
r.SetAccept(stateOffset+int(state), true)
state++
continue
}
}
break
}
// Bulk copy and then fixup dest for each transition:
r.transitions = util.Grow(r.transitions, r.nextTransition+other.nextTransition)
copy(r.transitions[r.nextTransition:r.nextTransition+other.nextTransition], other.transitions)
for i := 0; i < other.nextTransition; i += 3 {
r.transitions[r.nextTransition+i] += stateOffset
}
r.nextTransition += other.nextTransition
if other.deterministic == false {
r.deterministic = false
}
}
// Freezes the last state, sorting and reducing the transitions.
func (r *Automaton) finishCurrentState() {
numTransitions := r.states[2*r.curState+1]
offset := r.states[2*r.curState]
start := offset / 3
sort.Sort(&destMinMaxSorter{
from: start,
to: start + numTransitions,
Automaton: r,
})
// Reduce any "adjacent" transitions:
upto := 0
minValue := -1
maxValue := -1
dest := -1
for i := 0; i < numTransitions; i++ {
tDest := r.transitions[offset+3*i]
tMin := r.transitions[offset+3*i+1]
tMax := r.transitions[offset+3*i+2]
if dest == tDest {
if tMin <= maxValue+1 {
if tMax > maxValue {
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
minValue = tMin
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
dest = tDest
minValue = tMin
maxValue = tMax
}
}
if dest != -1 {
// Last transition
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
r.nextTransition -= (numTransitions - upto) * 3
r.states[2*r.curState+1] = upto
// Sort transitions by minValue/maxValue/dest:
sort.Sort(&minMaxDestSorter{
from: start,
to: start + upto,
Automaton: r,
})
if r.deterministic && upto > 1 {
lastMax := r.transitions[offset+2]
for i := 1; i < upto; i++ {
minValue = r.transitions[offset+3*i+1]
if minValue <= lastMax {
r.deterministic = false
break
}
lastMax = r.transitions[offset+3*i+2]
}
}
}
// IsDeterministic Returns true if this automaton is deterministic (for ever state there is only one
// transition for each label).
func (r *Automaton) IsDeterministic() bool {
return r.deterministic
}
// Finishes the current state; call this once you are done adding transitions for a state.
// This is automatically called if you start adding transitions to a new source state,
// but for the last state you add you need to this method yourself.
func (r *Automaton) finishState() {
if r.curState != -1 {
r.finishCurrentState()
r.curState = -1
}
}
// GetNumStates How many states this automaton has.
func (r *Automaton) GetNumStates() int {
return r.nextState / 2
}
// GetNumTransitions How many transitions this automaton has.
func (r *Automaton) GetNumTransitions() int {
return r.nextTransition / 3
}
// GetNumTransitionsWithState How many transitions this state has.
func (r *Automaton) GetNumTransitionsWithState(state int) int {
count := r.states[2*state+1]
if count == -1 {
return 0
}
return count
}
func (r *Automaton) growStates() |
func (r *Automaton) growTransitions() {
if r.nextTransition+3 > len(r.transitions) {
r.transitions = util.Grow(r.transitions, r.nextTransition+3)
}
}
// Sorts transitions by dest, ascending, then min label ascending, then max label ascending
type destMinMaxSorter struct {
from, to int
*Automaton
}
func (r *destMinMaxSorter) Len() int {
return r.to - r.from
}
func (r *destMinMaxSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
// First dest:
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
// Then min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
return false
}
func (r *destMinMaxSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *destMinMaxSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// Sorts transitions by min label, ascending, then max label ascending, then dest ascending
type minMaxDestSorter struct {
from, to int
*Automaton
}
func (r *minMaxDestSorter) Len() int {
return r.to - r.from
}
func (r *minMaxDestSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
// First min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
// Then dest:
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
return false
}
func (r *minMaxDestSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *minMaxDestSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// InitTransition Initialize the provided Transition to iterate through all transitions leaving the specified
// state. You must call GetNextTransition to get each transition. Returns the number of transitions leaving
// this state.
func (r *Automaton) InitTransition(state int, t *Transition) int {
t.Source = state
t.TransitionUpto = r.states[2*state]
return r.GetNumTransitionsWithState(state)
}
// GetNextTransition Iterate to the next transition after the provided one
func (r *Automaton) GetNextTransition(t *Transition) {
t.Dest = r.transitions[t.TransitionUpto]
t.TransitionUpto++
t.Min = r.transitions[t.TransitionUpto]
t.TransitionUpto++
t.Max = r.transitions[t.TransitionUpto]
t.TransitionUpto++
}
func (r *Automaton) transitionSorted(t *Transition) bool {
upto := t.TransitionUpto
if upto == r.states[2*t.Source] {
// Transition isn't initialized yet (this is the first transition); don't check:
return true
}
nextDest := r.transitions[upto]
nextMin := r.transitions[upto+1]
nextMax := r.transitions[upto+2]
if nextMin > t.Min {
return true
} else if nextMin < t.Min {
return false
}
// Min is equal, now test max:
if nextMax > t.Max {
return true
} else if nextMax < t.Max {
return false
}
// Max is also equal, now test dest:
if nextDest > t.Dest {
return true
} else if nextDest < t.Dest {
return false
}
// We should never see fully equal transitions here:
return false
}
// Fill the provided Transition with the index'th transition leaving the specified state.
func (r *Automaton) getTransition(state, index int, t *Transition) {
i := r.states[2*state] + 3*index
t.Source = state
t.Dest = r.transitions[i]
i++
t.Min = r.transitions[i]
i++
t.Max = r.transitions[i]
i++
}
// Returns sorted array of all interval start points.
func (r *Automaton) GetStartPoints() []int {
pointset := make(map[int]struct{})
pointset[0] = struct{}{}
for s := 0; s < r.nextState; s += 2 {
trans := r.states[s]
limit := trans + 3*r.states[s+1]
//System.out.println(" state=" + (s/2) + " trans=" + trans + " limit=" + limit);
for trans < limit {
min := r.transitions[trans+1]
max := r.transitions[trans+2]
//System.out.println(" min=" + min);
pointset[min] = struct{}{}
if max < 0x10FFFF {
pointset[max+1] = struct{}{}
}
trans += 3
}
}
points := make([]int, 0, len(pointset))
for k, _ := range pointset {
points = append(points, k)
}
sort.Ints(points)
return points
}
// Step Performs lookup in transitions, assuming determinism.
// Params: state – starting state
//
// label – codepoint to look up
//
// Returns: destination state, -1 if no matching outgoing transition
func (r *Automaton) Step(state, label int) int {
return r.next(state, 0, label, nil)
}
// Next
// Looks for the next transition that matches the provided label, assuming determinism.
// This method is similar to step(int, int) but is used more efficiently when iterating over multiple
// transitions from the same source state. It keeps the latest reached transition index in
// transition.transitionUpto so the next call to this method can continue from there instead of restarting
// from the first transition.
//
// transition: The transition to start the lookup from (inclusive, using its Transition.source
// and Transition.transitionUpto). It is updated with the matched transition; or with
// Transition.dest = -1 if no match.
//
// label: The codepoint to look up.
//
// Returns: The destination state; or -1 if no matching outgoing transition.
func (r *Automaton) Next(transition *Transition, label int) int {
return r.next(transition.Source, 0, label, transition)
}
// Looks for the next transition that matches the provided label, assuming determinism.
// state: The source state.
// fromTransitionIndex: The transition index to start the lookup from (inclusive); negative interpreted as 0.
// label: The codepoint to look up.
// transition: The output transition to update with the matching transition; or null for no update.
//
// Returns: The destination state; or -1 if no matching outgoing transition.
func (r *Automaton) next(state, fromTransitionIndex, label int, transition *Transition) int {
stateIndex := 2 * state
firstTransitionIndex := r.states[stateIndex]
numTransitions := r.states[stateIndex+1]
// Since transitions are sorted,
// binary search the transition for which label is within [minLabel, maxLabel].
low := max(fromTransitionIndex, 0)
high := numTransitions - 1
for low <= high {
mid := (low + high) >> 1
transitionIndex := firstTransitionIndex + 3*mid
minLabel := r.transitions[transitionIndex+1]
if minLabel > label {
high = mid - 1
} else {
maxLabel := r.transitions[transitionIndex+2]
if maxLabel < label {
low = mid + 1
} else {
destState := r.transitions[transitionIndex]
if transition != nil {
transition.Dest = destState
transition.Min = minLabel
transition.Max = maxLabel
transition.TransitionUpto = mid
}
return destState
}
}
}
destState := -1
if transition != nil {
transition.Dest = destState
transition.TransitionUpto = low
}
return destState
}
var _ sort.Interface = &builderSorter{}
type builderSorter struct {
values []int
size int
}
func (b *builderSorter) Len() int {
return b.size
}
func (b *builderSorter) Less(i, j int) bool {
i *= 4
j *= 4
if b.values[i] < b.values[j] {
return true
} else if b.values[i] > b.values[j] {
return false
}
if b.values[i+1] < b.values[j+1] {
return true
} else if b.values[i+1] > b.values[j+1] {
return false
}
if b.values[i+2] < b.values[j+2] {
return true
} else if b.values[i+2] > b.values[j+2] {
return false
}
if b.values[i+3] < b.values[j+3] {
return true
} else if b.values[i+3] > b.values[j+3] {
return false
}
return false
}
func (b *builderSorter) Swap(i, j int) {
i *= 4
j *= 4
b.values[i], b.values[j] = b.values[j], b.values[i]
b.values[i+1], b.values[j+1] = b.values[j+1], b.values[i+1]
b.values[i+2], b.values[j+2] = b.values[j+2], b.values[i+2]
b.values[i+3], b.values[j+3] = b.values[j+3], b.values[i+3]
}
func (r *Builder) sort(from, to int) {
sort.Sort(&builderSorter{
values: r.transitions,
size: to - from,
})
}
func (r *Builder) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
}
| {
if r.nextState+2 > len(r.states) {
r.states = util.Grow(r.states, r.nextState+2)
}
} | identifier_body |
automaton.go | package automaton
import (
"fmt"
"github.com/bits-and-blooms/bitset"
"github.com/geange/lucene-go/core/util"
"sort"
)
// Automaton Represents an automaton and all its states and transitions. States are integers and must be
// created using createState. Mark a state as an accept state using setAccept. Add transitions using
// addTransition. Each state must have all of its transitions added at once; if this is too restrictive
// then use Automaton.Builder instead. State 0 is always the initial state. Once a state is finished,
// either because you've starting adding transitions to another state or you call finishState, then that
// states transitions are sorted (first by min, then max, then dest) and reduced (transitions with adjacent
// labels going to the same dest are combined).
type Automaton struct {
// Where we next write to the int[] states; this increments by 2 for each added state because we
// pack a pointer to the transitions array and a count of how many transitions leave the state.
nextState int
// Where we next write to in int[] transitions; this increments by 3 for each added transition because
// we pack min, max, dest in sequence.
nextTransition int
// Current state we are adding transitions to; the caller must add all transitions for this state
// before moving onto another state.
curState int
// Index in the transitions array, where this states leaving transitions are stored, or -1
// if this state has not added any transitions yet, followed by number of transitions.
states []int
isAccept *bitset.BitSet
// Holds toState, min, max for each transition.
transitions []int
// True if no state has two transitions leaving with the same label.
deterministic bool
}
func NewAutomaton() *Automaton {
return NewAutomatonV1(2, 2)
}
func NewAutomatonV1(numStates, numTransitions int) *Automaton {
return &Automaton{
curState: -1,
deterministic: true,
states: make([]int, numStates*2),
isAccept: bitset.New(uint(numStates)),
transitions: make([]int, numTransitions*3),
}
}
// CreateState Create a new state.
func (r *Automaton) CreateState() int {
r.growStates()
state := r.nextState / 2
r.states[r.nextState] = -1
r.nextState += 2
return state
}
// SetAccept Set or clear this state as an accept state.
func (r *Automaton) SetAccept(state int, accept bool) {
r.isAccept.SetTo(uint(state), accept)
}
// Sugar to get all transitions for all states. This is object-heavy; it's better to iterate state by state instead.
func (r *Automaton) getSortedTransitions() [][]Transition {
numStates := r.GetNumStates()
transitions := make([][]Transition, numStates)
for s := 0; s < numStates; s++ {
numTransitions := r.GetNumTransitionsWithState(s)
transitions[s] = make([]Transition, numTransitions)
for t := 0; t < numTransitions; t++ {
transition := Transition{}
r.getTransition(s, t, &transition)
transitions[s][t] = transition
}
}
return transitions
}
// Returns accept states. If the bit is set then that state is an accept state.
func (r *Automaton) getAcceptStates() *bitset.BitSet {
return r.isAccept
}
// IsAccept Returns true if this state is an accept state.
func (r *Automaton) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
}
// AddTransitionLabel Add a new transition with min = max = label.
func (r *Automaton) AddTransitionLabel(source, dest, label int) error {
return r.AddTransition(source, dest, label, label)
}
// AddTransition Add a new transition with the specified source, dest, min, max.
func (r *Automaton) | (source, dest, min, max int) error {
//bounds := r.nextState / 2
r.growTransitions()
if r.curState != source {
if r.curState != -1 {
r.finishCurrentState()
}
// Move to next source:
r.curState = source
if r.states[2*r.curState] != -1 {
return fmt.Errorf("from state (%d) already had transitions added", source)
}
r.states[2*r.curState] = r.nextTransition
}
r.transitions[r.nextTransition] = dest
r.nextTransition++
r.transitions[r.nextTransition] = min
r.nextTransition++
r.transitions[r.nextTransition] = max
r.nextTransition++
// Increment transition count for this state
r.states[2*r.curState+1]++
return nil
}
// AddEpsilon Add a [virtual] epsilon transition between source and dest. Dest state must already have all
// transitions added because this method simply copies those same transitions over to source.
func (r *Automaton) AddEpsilon(source, dest int) {
t := Transition{}
count := r.InitTransition(dest, &t)
for i := 0; i < count; i++ {
r.GetNextTransition(&t)
_ = r.AddTransition(source, t.Dest, t.Min, t.Max)
}
if r.IsAccept(dest) {
r.SetAccept(source, true)
}
}
// Copy Copies over all states/transitions from other. The states numbers are sequentially assigned (appended).
func (r *Automaton) Copy(other *Automaton) {
// Bulk copy and then fixup the state pointers:
stateOffset := r.GetNumStates()
r.states = util.Grow(r.states, r.nextState+other.nextState)
copy(r.states[r.nextState:r.nextState+other.nextState], other.states)
for i := 0; i < other.nextState; i += 2 {
if r.states[r.nextState+i] != -1 {
r.states[r.nextState+i] += r.nextTransition
}
}
r.nextState += other.nextState
otherNumStates := other.GetNumStates()
otherAcceptStates := other.getAcceptStates()
state := uint(0)
for {
if state < uint(otherNumStates) {
if state, ok := otherAcceptStates.NextSet(state); ok {
r.SetAccept(stateOffset+int(state), true)
state++
continue
}
}
break
}
// Bulk copy and then fixup dest for each transition:
r.transitions = util.Grow(r.transitions, r.nextTransition+other.nextTransition)
copy(r.transitions[r.nextTransition:r.nextTransition+other.nextTransition], other.transitions)
for i := 0; i < other.nextTransition; i += 3 {
r.transitions[r.nextTransition+i] += stateOffset
}
r.nextTransition += other.nextTransition
if other.deterministic == false {
r.deterministic = false
}
}
// Freezes the last state, sorting and reducing the transitions.
func (r *Automaton) finishCurrentState() {
numTransitions := r.states[2*r.curState+1]
offset := r.states[2*r.curState]
start := offset / 3
sort.Sort(&destMinMaxSorter{
from: start,
to: start + numTransitions,
Automaton: r,
})
// Reduce any "adjacent" transitions:
upto := 0
minValue := -1
maxValue := -1
dest := -1
for i := 0; i < numTransitions; i++ {
tDest := r.transitions[offset+3*i]
tMin := r.transitions[offset+3*i+1]
tMax := r.transitions[offset+3*i+2]
if dest == tDest {
if tMin <= maxValue+1 {
if tMax > maxValue {
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
minValue = tMin
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
dest = tDest
minValue = tMin
maxValue = tMax
}
}
if dest != -1 {
// Last transition
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
r.nextTransition -= (numTransitions - upto) * 3
r.states[2*r.curState+1] = upto
// Sort transitions by minValue/maxValue/dest:
sort.Sort(&minMaxDestSorter{
from: start,
to: start + upto,
Automaton: r,
})
if r.deterministic && upto > 1 {
lastMax := r.transitions[offset+2]
for i := 1; i < upto; i++ {
minValue = r.transitions[offset+3*i+1]
if minValue <= lastMax {
r.deterministic = false
break
}
lastMax = r.transitions[offset+3*i+2]
}
}
}
// IsDeterministic Returns true if this automaton is deterministic (for ever state there is only one
// transition for each label).
func (r *Automaton) IsDeterministic() bool {
return r.deterministic
}
// Finishes the current state; call this once you are done adding transitions for a state.
// This is automatically called if you start adding transitions to a new source state,
// but for the last state you add you need to this method yourself.
func (r *Automaton) finishState() {
if r.curState != -1 {
r.finishCurrentState()
r.curState = -1
}
}
// GetNumStates How many states this automaton has.
func (r *Automaton) GetNumStates() int {
return r.nextState / 2
}
// GetNumTransitions How many transitions this automaton has.
func (r *Automaton) GetNumTransitions() int {
return r.nextTransition / 3
}
// GetNumTransitionsWithState How many transitions this state has.
func (r *Automaton) GetNumTransitionsWithState(state int) int {
count := r.states[2*state+1]
if count == -1 {
return 0
}
return count
}
func (r *Automaton) growStates() {
if r.nextState+2 > len(r.states) {
r.states = util.Grow(r.states, r.nextState+2)
}
}
func (r *Automaton) growTransitions() {
if r.nextTransition+3 > len(r.transitions) {
r.transitions = util.Grow(r.transitions, r.nextTransition+3)
}
}
// Sorts transitions by dest, ascending, then min label ascending, then max label ascending
type destMinMaxSorter struct {
from, to int
*Automaton
}
func (r *destMinMaxSorter) Len() int {
return r.to - r.from
}
func (r *destMinMaxSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
// First dest:
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
// Then min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
return false
}
func (r *destMinMaxSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *destMinMaxSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// Sorts transitions by min label, ascending, then max label ascending, then dest ascending
type minMaxDestSorter struct {
from, to int
*Automaton
}
func (r *minMaxDestSorter) Len() int {
return r.to - r.from
}
func (r *minMaxDestSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
// First min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
// Then dest:
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
return false
}
func (r *minMaxDestSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *minMaxDestSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// InitTransition Initialize the provided Transition to iterate through all transitions leaving the specified
// state. You must call GetNextTransition to get each transition. Returns the number of transitions leaving
// this state.
func (r *Automaton) InitTransition(state int, t *Transition) int {
t.Source = state
t.TransitionUpto = r.states[2*state]
return r.GetNumTransitionsWithState(state)
}
// GetNextTransition Iterate to the next transition after the provided one
func (r *Automaton) GetNextTransition(t *Transition) {
t.Dest = r.transitions[t.TransitionUpto]
t.TransitionUpto++
t.Min = r.transitions[t.TransitionUpto]
t.TransitionUpto++
t.Max = r.transitions[t.TransitionUpto]
t.TransitionUpto++
}
func (r *Automaton) transitionSorted(t *Transition) bool {
upto := t.TransitionUpto
if upto == r.states[2*t.Source] {
// Transition isn't initialized yet (this is the first transition); don't check:
return true
}
nextDest := r.transitions[upto]
nextMin := r.transitions[upto+1]
nextMax := r.transitions[upto+2]
if nextMin > t.Min {
return true
} else if nextMin < t.Min {
return false
}
// Min is equal, now test max:
if nextMax > t.Max {
return true
} else if nextMax < t.Max {
return false
}
// Max is also equal, now test dest:
if nextDest > t.Dest {
return true
} else if nextDest < t.Dest {
return false
}
// We should never see fully equal transitions here:
return false
}
// Fill the provided Transition with the index'th transition leaving the specified state.
func (r *Automaton) getTransition(state, index int, t *Transition) {
i := r.states[2*state] + 3*index
t.Source = state
t.Dest = r.transitions[i]
i++
t.Min = r.transitions[i]
i++
t.Max = r.transitions[i]
i++
}
// Returns sorted array of all interval start points.
func (r *Automaton) GetStartPoints() []int {
pointset := make(map[int]struct{})
pointset[0] = struct{}{}
for s := 0; s < r.nextState; s += 2 {
trans := r.states[s]
limit := trans + 3*r.states[s+1]
//System.out.println(" state=" + (s/2) + " trans=" + trans + " limit=" + limit);
for trans < limit {
min := r.transitions[trans+1]
max := r.transitions[trans+2]
//System.out.println(" min=" + min);
pointset[min] = struct{}{}
if max < 0x10FFFF {
pointset[max+1] = struct{}{}
}
trans += 3
}
}
points := make([]int, 0, len(pointset))
for k, _ := range pointset {
points = append(points, k)
}
sort.Ints(points)
return points
}
// Step Performs lookup in transitions, assuming determinism.
// Params: state – starting state
//
// label – codepoint to look up
//
// Returns: destination state, -1 if no matching outgoing transition
func (r *Automaton) Step(state, label int) int {
return r.next(state, 0, label, nil)
}
// Next
// Looks for the next transition that matches the provided label, assuming determinism.
// This method is similar to step(int, int) but is used more efficiently when iterating over multiple
// transitions from the same source state. It keeps the latest reached transition index in
// transition.transitionUpto so the next call to this method can continue from there instead of restarting
// from the first transition.
//
// transition: The transition to start the lookup from (inclusive, using its Transition.source
// and Transition.transitionUpto). It is updated with the matched transition; or with
// Transition.dest = -1 if no match.
//
// label: The codepoint to look up.
//
// Returns: The destination state; or -1 if no matching outgoing transition.
func (r *Automaton) Next(transition *Transition, label int) int {
return r.next(transition.Source, 0, label, transition)
}
// Looks for the next transition that matches the provided label, assuming determinism.
// state: The source state.
// fromTransitionIndex: The transition index to start the lookup from (inclusive); negative interpreted as 0.
// label: The codepoint to look up.
// transition: The output transition to update with the matching transition; or null for no update.
//
// Returns: The destination state; or -1 if no matching outgoing transition.
func (r *Automaton) next(state, fromTransitionIndex, label int, transition *Transition) int {
stateIndex := 2 * state
firstTransitionIndex := r.states[stateIndex]
numTransitions := r.states[stateIndex+1]
// Since transitions are sorted,
// binary search the transition for which label is within [minLabel, maxLabel].
low := max(fromTransitionIndex, 0)
high := numTransitions - 1
for low <= high {
mid := (low + high) >> 1
transitionIndex := firstTransitionIndex + 3*mid
minLabel := r.transitions[transitionIndex+1]
if minLabel > label {
high = mid - 1
} else {
maxLabel := r.transitions[transitionIndex+2]
if maxLabel < label {
low = mid + 1
} else {
destState := r.transitions[transitionIndex]
if transition != nil {
transition.Dest = destState
transition.Min = minLabel
transition.Max = maxLabel
transition.TransitionUpto = mid
}
return destState
}
}
}
destState := -1
if transition != nil {
transition.Dest = destState
transition.TransitionUpto = low
}
return destState
}
var _ sort.Interface = &builderSorter{}
type builderSorter struct {
values []int
size int
}
func (b *builderSorter) Len() int {
return b.size
}
func (b *builderSorter) Less(i, j int) bool {
i *= 4
j *= 4
if b.values[i] < b.values[j] {
return true
} else if b.values[i] > b.values[j] {
return false
}
if b.values[i+1] < b.values[j+1] {
return true
} else if b.values[i+1] > b.values[j+1] {
return false
}
if b.values[i+2] < b.values[j+2] {
return true
} else if b.values[i+2] > b.values[j+2] {
return false
}
if b.values[i+3] < b.values[j+3] {
return true
} else if b.values[i+3] > b.values[j+3] {
return false
}
return false
}
func (b *builderSorter) Swap(i, j int) {
i *= 4
j *= 4
b.values[i], b.values[j] = b.values[j], b.values[i]
b.values[i+1], b.values[j+1] = b.values[j+1], b.values[i+1]
b.values[i+2], b.values[j+2] = b.values[j+2], b.values[i+2]
b.values[i+3], b.values[j+3] = b.values[j+3], b.values[i+3]
}
func (r *Builder) sort(from, to int) {
sort.Sort(&builderSorter{
values: r.transitions,
size: to - from,
})
}
func (r *Builder) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
}
| AddTransition | identifier_name |
automaton.go | package automaton
import (
"fmt"
"github.com/bits-and-blooms/bitset"
"github.com/geange/lucene-go/core/util"
"sort"
)
// Automaton Represents an automaton and all its states and transitions. States are integers and must be
// created using createState. Mark a state as an accept state using setAccept. Add transitions using
// addTransition. Each state must have all of its transitions added at once; if this is too restrictive
// then use Automaton.Builder instead. State 0 is always the initial state. Once a state is finished,
// either because you've starting adding transitions to another state or you call finishState, then that
// states transitions are sorted (first by min, then max, then dest) and reduced (transitions with adjacent
// labels going to the same dest are combined).
type Automaton struct {
// Where we next write to the int[] states; this increments by 2 for each added state because we
// pack a pointer to the transitions array and a count of how many transitions leave the state.
nextState int
// Where we next write to in int[] transitions; this increments by 3 for each added transition because
// we pack min, max, dest in sequence.
nextTransition int
// Current state we are adding transitions to; the caller must add all transitions for this state
// before moving onto another state.
curState int
// Index in the transitions array, where this states leaving transitions are stored, or -1
// if this state has not added any transitions yet, followed by number of transitions.
states []int
isAccept *bitset.BitSet
// Holds toState, min, max for each transition.
transitions []int
// True if no state has two transitions leaving with the same label.
deterministic bool
}
func NewAutomaton() *Automaton {
return NewAutomatonV1(2, 2)
}
func NewAutomatonV1(numStates, numTransitions int) *Automaton {
return &Automaton{
curState: -1,
deterministic: true,
states: make([]int, numStates*2),
isAccept: bitset.New(uint(numStates)),
transitions: make([]int, numTransitions*3),
}
}
// CreateState Create a new state.
func (r *Automaton) CreateState() int {
r.growStates()
state := r.nextState / 2
r.states[r.nextState] = -1
r.nextState += 2
return state
}
// SetAccept Set or clear this state as an accept state.
func (r *Automaton) SetAccept(state int, accept bool) {
r.isAccept.SetTo(uint(state), accept)
}
// Sugar to get all transitions for all states. This is object-heavy; it's better to iterate state by state instead.
func (r *Automaton) getSortedTransitions() [][]Transition {
numStates := r.GetNumStates()
transitions := make([][]Transition, numStates)
for s := 0; s < numStates; s++ {
numTransitions := r.GetNumTransitionsWithState(s)
transitions[s] = make([]Transition, numTransitions)
for t := 0; t < numTransitions; t++ {
transition := Transition{}
r.getTransition(s, t, &transition)
transitions[s][t] = transition
}
}
return transitions
}
// Returns accept states. If the bit is set then that state is an accept state.
func (r *Automaton) getAcceptStates() *bitset.BitSet {
return r.isAccept
}
// IsAccept Returns true if this state is an accept state.
func (r *Automaton) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
}
// AddTransitionLabel Add a new transition with min = max = label.
func (r *Automaton) AddTransitionLabel(source, dest, label int) error {
return r.AddTransition(source, dest, label, label)
}
// AddTransition Add a new transition with the specified source, dest, min, max.
func (r *Automaton) AddTransition(source, dest, min, max int) error {
//bounds := r.nextState / 2
r.growTransitions()
if r.curState != source {
if r.curState != -1 {
r.finishCurrentState()
}
// Move to next source:
r.curState = source
if r.states[2*r.curState] != -1 {
return fmt.Errorf("from state (%d) already had transitions added", source)
}
r.states[2*r.curState] = r.nextTransition
}
r.transitions[r.nextTransition] = dest
r.nextTransition++
r.transitions[r.nextTransition] = min
r.nextTransition++
r.transitions[r.nextTransition] = max
r.nextTransition++
// Increment transition count for this state
r.states[2*r.curState+1]++
return nil
}
// AddEpsilon Add a [virtual] epsilon transition between source and dest. Dest state must already have all
// transitions added because this method simply copies those same transitions over to source.
func (r *Automaton) AddEpsilon(source, dest int) {
t := Transition{}
count := r.InitTransition(dest, &t)
for i := 0; i < count; i++ {
r.GetNextTransition(&t)
_ = r.AddTransition(source, t.Dest, t.Min, t.Max)
}
if r.IsAccept(dest) {
r.SetAccept(source, true)
}
}
// Copy Copies over all states/transitions from other. The states numbers are sequentially assigned (appended).
func (r *Automaton) Copy(other *Automaton) {
// Bulk copy and then fixup the state pointers:
stateOffset := r.GetNumStates()
r.states = util.Grow(r.states, r.nextState+other.nextState)
copy(r.states[r.nextState:r.nextState+other.nextState], other.states)
for i := 0; i < other.nextState; i += 2 {
if r.states[r.nextState+i] != -1 {
r.states[r.nextState+i] += r.nextTransition
}
}
r.nextState += other.nextState
otherNumStates := other.GetNumStates()
otherAcceptStates := other.getAcceptStates()
state := uint(0)
for {
if state < uint(otherNumStates) {
if state, ok := otherAcceptStates.NextSet(state); ok {
r.SetAccept(stateOffset+int(state), true)
state++
continue
}
}
break
}
// Bulk copy and then fixup dest for each transition:
r.transitions = util.Grow(r.transitions, r.nextTransition+other.nextTransition)
copy(r.transitions[r.nextTransition:r.nextTransition+other.nextTransition], other.transitions)
for i := 0; i < other.nextTransition; i += 3 {
r.transitions[r.nextTransition+i] += stateOffset
}
r.nextTransition += other.nextTransition
if other.deterministic == false {
r.deterministic = false
}
}
// Freezes the last state, sorting and reducing the transitions.
func (r *Automaton) finishCurrentState() {
numTransitions := r.states[2*r.curState+1]
offset := r.states[2*r.curState]
start := offset / 3
sort.Sort(&destMinMaxSorter{
from: start,
to: start + numTransitions,
Automaton: r,
})
// Reduce any "adjacent" transitions:
upto := 0
minValue := -1
maxValue := -1
dest := -1
for i := 0; i < numTransitions; i++ {
tDest := r.transitions[offset+3*i]
tMin := r.transitions[offset+3*i+1]
tMax := r.transitions[offset+3*i+2]
if dest == tDest {
if tMin <= maxValue+1 {
if tMax > maxValue {
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
minValue = tMin
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
dest = tDest
minValue = tMin
maxValue = tMax
}
}
if dest != -1 {
// Last transition
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
r.nextTransition -= (numTransitions - upto) * 3
r.states[2*r.curState+1] = upto
// Sort transitions by minValue/maxValue/dest:
sort.Sort(&minMaxDestSorter{
from: start,
to: start + upto,
Automaton: r,
})
if r.deterministic && upto > 1 {
lastMax := r.transitions[offset+2]
for i := 1; i < upto; i++ {
minValue = r.transitions[offset+3*i+1]
if minValue <= lastMax {
r.deterministic = false
break
}
lastMax = r.transitions[offset+3*i+2]
}
}
}
// IsDeterministic Returns true if this automaton is deterministic (for ever state there is only one
// transition for each label).
func (r *Automaton) IsDeterministic() bool {
return r.deterministic
}
// Finishes the current state; call this once you are done adding transitions for a state.
// This is automatically called if you start adding transitions to a new source state,
// but for the last state you add you need to this method yourself.
func (r *Automaton) finishState() {
if r.curState != -1 {
r.finishCurrentState()
r.curState = -1
}
}
// GetNumStates How many states this automaton has.
func (r *Automaton) GetNumStates() int {
return r.nextState / 2
}
// GetNumTransitions How many transitions this automaton has.
func (r *Automaton) GetNumTransitions() int {
return r.nextTransition / 3
}
// GetNumTransitionsWithState How many transitions this state has.
func (r *Automaton) GetNumTransitionsWithState(state int) int {
count := r.states[2*state+1]
if count == -1 {
return 0
}
return count
}
func (r *Automaton) growStates() {
if r.nextState+2 > len(r.states) {
r.states = util.Grow(r.states, r.nextState+2)
}
}
func (r *Automaton) growTransitions() {
if r.nextTransition+3 > len(r.transitions) {
r.transitions = util.Grow(r.transitions, r.nextTransition+3)
}
}
// Sorts transitions by dest, ascending, then min label ascending, then max label ascending
type destMinMaxSorter struct {
from, to int
*Automaton
}
func (r *destMinMaxSorter) Len() int {
return r.to - r.from
}
func (r *destMinMaxSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
// First dest:
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
// Then min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax |
return false
}
func (r *destMinMaxSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *destMinMaxSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// Sorts transitions by min label, ascending, then max label ascending, then dest ascending
type minMaxDestSorter struct {
from, to int
*Automaton
}
func (r *minMaxDestSorter) Len() int {
return r.to - r.from
}
func (r *minMaxDestSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
// First min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
// Then dest:
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
return false
}
func (r *minMaxDestSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *minMaxDestSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// InitTransition Initialize the provided Transition to iterate through all transitions leaving the specified
// state. You must call GetNextTransition to get each transition. Returns the number of transitions leaving
// this state.
func (r *Automaton) InitTransition(state int, t *Transition) int {
t.Source = state
t.TransitionUpto = r.states[2*state]
return r.GetNumTransitionsWithState(state)
}
// GetNextTransition Iterate to the next transition after the provided one
func (r *Automaton) GetNextTransition(t *Transition) {
t.Dest = r.transitions[t.TransitionUpto]
t.TransitionUpto++
t.Min = r.transitions[t.TransitionUpto]
t.TransitionUpto++
t.Max = r.transitions[t.TransitionUpto]
t.TransitionUpto++
}
func (r *Automaton) transitionSorted(t *Transition) bool {
upto := t.TransitionUpto
if upto == r.states[2*t.Source] {
// Transition isn't initialized yet (this is the first transition); don't check:
return true
}
nextDest := r.transitions[upto]
nextMin := r.transitions[upto+1]
nextMax := r.transitions[upto+2]
if nextMin > t.Min {
return true
} else if nextMin < t.Min {
return false
}
// Min is equal, now test max:
if nextMax > t.Max {
return true
} else if nextMax < t.Max {
return false
}
// Max is also equal, now test dest:
if nextDest > t.Dest {
return true
} else if nextDest < t.Dest {
return false
}
// We should never see fully equal transitions here:
return false
}
// Fill the provided Transition with the index'th transition leaving the specified state.
func (r *Automaton) getTransition(state, index int, t *Transition) {
i := r.states[2*state] + 3*index
t.Source = state
t.Dest = r.transitions[i]
i++
t.Min = r.transitions[i]
i++
t.Max = r.transitions[i]
i++
}
// Returns sorted array of all interval start points.
func (r *Automaton) GetStartPoints() []int {
pointset := make(map[int]struct{})
pointset[0] = struct{}{}
for s := 0; s < r.nextState; s += 2 {
trans := r.states[s]
limit := trans + 3*r.states[s+1]
//System.out.println(" state=" + (s/2) + " trans=" + trans + " limit=" + limit);
for trans < limit {
min := r.transitions[trans+1]
max := r.transitions[trans+2]
//System.out.println(" min=" + min);
pointset[min] = struct{}{}
if max < 0x10FFFF {
pointset[max+1] = struct{}{}
}
trans += 3
}
}
points := make([]int, 0, len(pointset))
for k, _ := range pointset {
points = append(points, k)
}
sort.Ints(points)
return points
}
// Step Performs lookup in transitions, assuming determinism.
// Params: state – starting state
//
// label – codepoint to look up
//
// Returns: destination state, -1 if no matching outgoing transition
func (r *Automaton) Step(state, label int) int {
return r.next(state, 0, label, nil)
}
// Next
// Looks for the next transition that matches the provided label, assuming determinism.
// This method is similar to step(int, int) but is used more efficiently when iterating over multiple
// transitions from the same source state. It keeps the latest reached transition index in
// transition.transitionUpto so the next call to this method can continue from there instead of restarting
// from the first transition.
//
// transition: The transition to start the lookup from (inclusive, using its Transition.source
// and Transition.transitionUpto). It is updated with the matched transition; or with
// Transition.dest = -1 if no match.
//
// label: The codepoint to look up.
//
// Returns: The destination state; or -1 if no matching outgoing transition.
func (r *Automaton) Next(transition *Transition, label int) int {
return r.next(transition.Source, 0, label, transition)
}
// Looks for the next transition that matches the provided label, assuming determinism.
// state: The source state.
// fromTransitionIndex: The transition index to start the lookup from (inclusive); negative interpreted as 0.
// label: The codepoint to look up.
// transition: The output transition to update with the matching transition; or null for no update.
//
// Returns: The destination state; or -1 if no matching outgoing transition.
func (r *Automaton) next(state, fromTransitionIndex, label int, transition *Transition) int {
stateIndex := 2 * state
firstTransitionIndex := r.states[stateIndex]
numTransitions := r.states[stateIndex+1]
// Since transitions are sorted,
// binary search the transition for which label is within [minLabel, maxLabel].
low := max(fromTransitionIndex, 0)
high := numTransitions - 1
for low <= high {
mid := (low + high) >> 1
transitionIndex := firstTransitionIndex + 3*mid
minLabel := r.transitions[transitionIndex+1]
if minLabel > label {
high = mid - 1
} else {
maxLabel := r.transitions[transitionIndex+2]
if maxLabel < label {
low = mid + 1
} else {
destState := r.transitions[transitionIndex]
if transition != nil {
transition.Dest = destState
transition.Min = minLabel
transition.Max = maxLabel
transition.TransitionUpto = mid
}
return destState
}
}
}
destState := -1
if transition != nil {
transition.Dest = destState
transition.TransitionUpto = low
}
return destState
}
var _ sort.Interface = &builderSorter{}
type builderSorter struct {
values []int
size int
}
func (b *builderSorter) Len() int {
return b.size
}
func (b *builderSorter) Less(i, j int) bool {
i *= 4
j *= 4
if b.values[i] < b.values[j] {
return true
} else if b.values[i] > b.values[j] {
return false
}
if b.values[i+1] < b.values[j+1] {
return true
} else if b.values[i+1] > b.values[j+1] {
return false
}
if b.values[i+2] < b.values[j+2] {
return true
} else if b.values[i+2] > b.values[j+2] {
return false
}
if b.values[i+3] < b.values[j+3] {
return true
} else if b.values[i+3] > b.values[j+3] {
return false
}
return false
}
func (b *builderSorter) Swap(i, j int) {
i *= 4
j *= 4
b.values[i], b.values[j] = b.values[j], b.values[i]
b.values[i+1], b.values[j+1] = b.values[j+1], b.values[i+1]
b.values[i+2], b.values[j+2] = b.values[j+2], b.values[i+2]
b.values[i+3], b.values[j+3] = b.values[j+3], b.values[i+3]
}
func (r *Builder) sort(from, to int) {
sort.Sort(&builderSorter{
values: r.transitions,
size: to - from,
})
}
func (r *Builder) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
}
| {
return false
} | conditional_block |
automaton.go | package automaton
import (
"fmt"
"github.com/bits-and-blooms/bitset"
"github.com/geange/lucene-go/core/util"
"sort"
)
// Automaton Represents an automaton and all its states and transitions. States are integers and must be
// created using createState. Mark a state as an accept state using setAccept. Add transitions using
// addTransition. Each state must have all of its transitions added at once; if this is too restrictive
// then use Automaton.Builder instead. State 0 is always the initial state. Once a state is finished,
// either because you've starting adding transitions to another state or you call finishState, then that
// states transitions are sorted (first by min, then max, then dest) and reduced (transitions with adjacent
// labels going to the same dest are combined).
type Automaton struct {
// Where we next write to the int[] states; this increments by 2 for each added state because we
// pack a pointer to the transitions array and a count of how many transitions leave the state.
nextState int
// Where we next write to in int[] transitions; this increments by 3 for each added transition because
// we pack min, max, dest in sequence.
nextTransition int
// Current state we are adding transitions to; the caller must add all transitions for this state
// before moving onto another state.
curState int
// Index in the transitions array, where this states leaving transitions are stored, or -1
// if this state has not added any transitions yet, followed by number of transitions.
states []int
isAccept *bitset.BitSet
// Holds toState, min, max for each transition.
transitions []int
// True if no state has two transitions leaving with the same label.
deterministic bool
}
func NewAutomaton() *Automaton {
return NewAutomatonV1(2, 2)
} | func NewAutomatonV1(numStates, numTransitions int) *Automaton {
return &Automaton{
curState: -1,
deterministic: true,
states: make([]int, numStates*2),
isAccept: bitset.New(uint(numStates)),
transitions: make([]int, numTransitions*3),
}
}
// CreateState Create a new state.
func (r *Automaton) CreateState() int {
r.growStates()
state := r.nextState / 2
r.states[r.nextState] = -1
r.nextState += 2
return state
}
// SetAccept Set or clear this state as an accept state.
func (r *Automaton) SetAccept(state int, accept bool) {
r.isAccept.SetTo(uint(state), accept)
}
// Sugar to get all transitions for all states. This is object-heavy; it's better to iterate state by state instead.
func (r *Automaton) getSortedTransitions() [][]Transition {
numStates := r.GetNumStates()
transitions := make([][]Transition, numStates)
for s := 0; s < numStates; s++ {
numTransitions := r.GetNumTransitionsWithState(s)
transitions[s] = make([]Transition, numTransitions)
for t := 0; t < numTransitions; t++ {
transition := Transition{}
r.getTransition(s, t, &transition)
transitions[s][t] = transition
}
}
return transitions
}
// Returns accept states. If the bit is set then that state is an accept state.
func (r *Automaton) getAcceptStates() *bitset.BitSet {
return r.isAccept
}
// IsAccept Returns true if this state is an accept state.
func (r *Automaton) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
}
// AddTransitionLabel Add a new transition with min = max = label.
func (r *Automaton) AddTransitionLabel(source, dest, label int) error {
return r.AddTransition(source, dest, label, label)
}
// AddTransition Add a new transition with the specified source, dest, min, max.
func (r *Automaton) AddTransition(source, dest, min, max int) error {
//bounds := r.nextState / 2
r.growTransitions()
if r.curState != source {
if r.curState != -1 {
r.finishCurrentState()
}
// Move to next source:
r.curState = source
if r.states[2*r.curState] != -1 {
return fmt.Errorf("from state (%d) already had transitions added", source)
}
r.states[2*r.curState] = r.nextTransition
}
r.transitions[r.nextTransition] = dest
r.nextTransition++
r.transitions[r.nextTransition] = min
r.nextTransition++
r.transitions[r.nextTransition] = max
r.nextTransition++
// Increment transition count for this state
r.states[2*r.curState+1]++
return nil
}
// AddEpsilon Add a [virtual] epsilon transition between source and dest. Dest state must already have all
// transitions added because this method simply copies those same transitions over to source.
func (r *Automaton) AddEpsilon(source, dest int) {
t := Transition{}
count := r.InitTransition(dest, &t)
for i := 0; i < count; i++ {
r.GetNextTransition(&t)
_ = r.AddTransition(source, t.Dest, t.Min, t.Max)
}
if r.IsAccept(dest) {
r.SetAccept(source, true)
}
}
// Copy Copies over all states/transitions from other. The states numbers are sequentially assigned (appended).
func (r *Automaton) Copy(other *Automaton) {
// Bulk copy and then fixup the state pointers:
stateOffset := r.GetNumStates()
r.states = util.Grow(r.states, r.nextState+other.nextState)
copy(r.states[r.nextState:r.nextState+other.nextState], other.states)
for i := 0; i < other.nextState; i += 2 {
if r.states[r.nextState+i] != -1 {
r.states[r.nextState+i] += r.nextTransition
}
}
r.nextState += other.nextState
otherNumStates := other.GetNumStates()
otherAcceptStates := other.getAcceptStates()
state := uint(0)
for {
if state < uint(otherNumStates) {
if state, ok := otherAcceptStates.NextSet(state); ok {
r.SetAccept(stateOffset+int(state), true)
state++
continue
}
}
break
}
// Bulk copy and then fixup dest for each transition:
r.transitions = util.Grow(r.transitions, r.nextTransition+other.nextTransition)
copy(r.transitions[r.nextTransition:r.nextTransition+other.nextTransition], other.transitions)
for i := 0; i < other.nextTransition; i += 3 {
r.transitions[r.nextTransition+i] += stateOffset
}
r.nextTransition += other.nextTransition
if other.deterministic == false {
r.deterministic = false
}
}
// Freezes the last state, sorting and reducing the transitions.
func (r *Automaton) finishCurrentState() {
numTransitions := r.states[2*r.curState+1]
offset := r.states[2*r.curState]
start := offset / 3
sort.Sort(&destMinMaxSorter{
from: start,
to: start + numTransitions,
Automaton: r,
})
// Reduce any "adjacent" transitions:
upto := 0
minValue := -1
maxValue := -1
dest := -1
for i := 0; i < numTransitions; i++ {
tDest := r.transitions[offset+3*i]
tMin := r.transitions[offset+3*i+1]
tMax := r.transitions[offset+3*i+2]
if dest == tDest {
if tMin <= maxValue+1 {
if tMax > maxValue {
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
minValue = tMin
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
dest = tDest
minValue = tMin
maxValue = tMax
}
}
if dest != -1 {
// Last transition
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
r.nextTransition -= (numTransitions - upto) * 3
r.states[2*r.curState+1] = upto
// Sort transitions by minValue/maxValue/dest:
sort.Sort(&minMaxDestSorter{
from: start,
to: start + upto,
Automaton: r,
})
if r.deterministic && upto > 1 {
lastMax := r.transitions[offset+2]
for i := 1; i < upto; i++ {
minValue = r.transitions[offset+3*i+1]
if minValue <= lastMax {
r.deterministic = false
break
}
lastMax = r.transitions[offset+3*i+2]
}
}
}
// IsDeterministic Returns true if this automaton is deterministic (for ever state there is only one
// transition for each label).
func (r *Automaton) IsDeterministic() bool {
return r.deterministic
}
// Finishes the current state; call this once you are done adding transitions for a state.
// This is automatically called if you start adding transitions to a new source state,
// but for the last state you add you need to this method yourself.
func (r *Automaton) finishState() {
if r.curState != -1 {
r.finishCurrentState()
r.curState = -1
}
}
// GetNumStates How many states this automaton has.
func (r *Automaton) GetNumStates() int {
return r.nextState / 2
}
// GetNumTransitions How many transitions this automaton has.
func (r *Automaton) GetNumTransitions() int {
return r.nextTransition / 3
}
// GetNumTransitionsWithState How many transitions this state has.
func (r *Automaton) GetNumTransitionsWithState(state int) int {
count := r.states[2*state+1]
if count == -1 {
return 0
}
return count
}
func (r *Automaton) growStates() {
if r.nextState+2 > len(r.states) {
r.states = util.Grow(r.states, r.nextState+2)
}
}
func (r *Automaton) growTransitions() {
if r.nextTransition+3 > len(r.transitions) {
r.transitions = util.Grow(r.transitions, r.nextTransition+3)
}
}
// Sorts transitions by dest, ascending, then min label ascending, then max label ascending
type destMinMaxSorter struct {
from, to int
*Automaton
}
func (r *destMinMaxSorter) Len() int {
return r.to - r.from
}
func (r *destMinMaxSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
// First dest:
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
// Then min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
return false
}
func (r *destMinMaxSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *destMinMaxSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// Sorts transitions by min label, ascending, then max label ascending, then dest ascending
type minMaxDestSorter struct {
from, to int
*Automaton
}
func (r *minMaxDestSorter) Len() int {
return r.to - r.from
}
func (r *minMaxDestSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
// First min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
// Then dest:
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
return false
}
func (r *minMaxDestSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *minMaxDestSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// InitTransition Initialize the provided Transition to iterate through all transitions leaving the specified
// state. You must call GetNextTransition to get each transition. Returns the number of transitions leaving
// this state.
func (r *Automaton) InitTransition(state int, t *Transition) int {
t.Source = state
t.TransitionUpto = r.states[2*state]
return r.GetNumTransitionsWithState(state)
}
// GetNextTransition Iterate to the next transition after the provided one
func (r *Automaton) GetNextTransition(t *Transition) {
t.Dest = r.transitions[t.TransitionUpto]
t.TransitionUpto++
t.Min = r.transitions[t.TransitionUpto]
t.TransitionUpto++
t.Max = r.transitions[t.TransitionUpto]
t.TransitionUpto++
}
func (r *Automaton) transitionSorted(t *Transition) bool {
upto := t.TransitionUpto
if upto == r.states[2*t.Source] {
// Transition isn't initialized yet (this is the first transition); don't check:
return true
}
nextDest := r.transitions[upto]
nextMin := r.transitions[upto+1]
nextMax := r.transitions[upto+2]
if nextMin > t.Min {
return true
} else if nextMin < t.Min {
return false
}
// Min is equal, now test max:
if nextMax > t.Max {
return true
} else if nextMax < t.Max {
return false
}
// Max is also equal, now test dest:
if nextDest > t.Dest {
return true
} else if nextDest < t.Dest {
return false
}
// We should never see fully equal transitions here:
return false
}
// Fill the provided Transition with the index'th transition leaving the specified state.
func (r *Automaton) getTransition(state, index int, t *Transition) {
i := r.states[2*state] + 3*index
t.Source = state
t.Dest = r.transitions[i]
i++
t.Min = r.transitions[i]
i++
t.Max = r.transitions[i]
i++
}
// Returns sorted array of all interval start points.
func (r *Automaton) GetStartPoints() []int {
pointset := make(map[int]struct{})
pointset[0] = struct{}{}
for s := 0; s < r.nextState; s += 2 {
trans := r.states[s]
limit := trans + 3*r.states[s+1]
//System.out.println(" state=" + (s/2) + " trans=" + trans + " limit=" + limit);
for trans < limit {
min := r.transitions[trans+1]
max := r.transitions[trans+2]
//System.out.println(" min=" + min);
pointset[min] = struct{}{}
if max < 0x10FFFF {
pointset[max+1] = struct{}{}
}
trans += 3
}
}
points := make([]int, 0, len(pointset))
for k, _ := range pointset {
points = append(points, k)
}
sort.Ints(points)
return points
}
// Step Performs lookup in transitions, assuming determinism.
// Params: state – starting state
//
// label – codepoint to look up
//
// Returns: destination state, -1 if no matching outgoing transition
func (r *Automaton) Step(state, label int) int {
return r.next(state, 0, label, nil)
}
// Next
// Looks for the next transition that matches the provided label, assuming determinism.
// This method is similar to step(int, int) but is used more efficiently when iterating over multiple
// transitions from the same source state. It keeps the latest reached transition index in
// transition.transitionUpto so the next call to this method can continue from there instead of restarting
// from the first transition.
//
// transition: The transition to start the lookup from (inclusive, using its Transition.source
// and Transition.transitionUpto). It is updated with the matched transition; or with
// Transition.dest = -1 if no match.
//
// label: The codepoint to look up.
//
// Returns: The destination state; or -1 if no matching outgoing transition.
func (r *Automaton) Next(transition *Transition, label int) int {
return r.next(transition.Source, 0, label, transition)
}
// Looks for the next transition that matches the provided label, assuming determinism.
// state: The source state.
// fromTransitionIndex: The transition index to start the lookup from (inclusive); negative interpreted as 0.
// label: The codepoint to look up.
// transition: The output transition to update with the matching transition; or null for no update.
//
// Returns: The destination state; or -1 if no matching outgoing transition.
func (r *Automaton) next(state, fromTransitionIndex, label int, transition *Transition) int {
stateIndex := 2 * state
firstTransitionIndex := r.states[stateIndex]
numTransitions := r.states[stateIndex+1]
// Since transitions are sorted,
// binary search the transition for which label is within [minLabel, maxLabel].
low := max(fromTransitionIndex, 0)
high := numTransitions - 1
for low <= high {
mid := (low + high) >> 1
transitionIndex := firstTransitionIndex + 3*mid
minLabel := r.transitions[transitionIndex+1]
if minLabel > label {
high = mid - 1
} else {
maxLabel := r.transitions[transitionIndex+2]
if maxLabel < label {
low = mid + 1
} else {
destState := r.transitions[transitionIndex]
if transition != nil {
transition.Dest = destState
transition.Min = minLabel
transition.Max = maxLabel
transition.TransitionUpto = mid
}
return destState
}
}
}
destState := -1
if transition != nil {
transition.Dest = destState
transition.TransitionUpto = low
}
return destState
}
var _ sort.Interface = &builderSorter{}
type builderSorter struct {
values []int
size int
}
func (b *builderSorter) Len() int {
return b.size
}
func (b *builderSorter) Less(i, j int) bool {
i *= 4
j *= 4
if b.values[i] < b.values[j] {
return true
} else if b.values[i] > b.values[j] {
return false
}
if b.values[i+1] < b.values[j+1] {
return true
} else if b.values[i+1] > b.values[j+1] {
return false
}
if b.values[i+2] < b.values[j+2] {
return true
} else if b.values[i+2] > b.values[j+2] {
return false
}
if b.values[i+3] < b.values[j+3] {
return true
} else if b.values[i+3] > b.values[j+3] {
return false
}
return false
}
func (b *builderSorter) Swap(i, j int) {
i *= 4
j *= 4
b.values[i], b.values[j] = b.values[j], b.values[i]
b.values[i+1], b.values[j+1] = b.values[j+1], b.values[i+1]
b.values[i+2], b.values[j+2] = b.values[j+2], b.values[i+2]
b.values[i+3], b.values[j+3] = b.values[j+3], b.values[i+3]
}
func (r *Builder) sort(from, to int) {
sort.Sort(&builderSorter{
values: r.transitions,
size: to - from,
})
}
func (r *Builder) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
} | random_line_split | |
main.rs | #![feature(array_windows)]
#![feature(format_args_capture)]
#![feature(total_cmp)]
use clap::App;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use liblinear::util::TrainingInput;
use liblinear::{Builder as LiblinearBuilder, LibLinearModel as _, SolverType};
use regex::Regex;
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use std::borrow::Cow;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::error::Error;
use std::path::Path;
use std::{fs, mem};
enum LexerState {
Start,
ContinueIdent,
ContinuePunct,
}
/// Mapping string-based features to integer indices and back.
#[derive(Default)]
struct FeatureMap {
features: Vec<String>,
map: HashMap<String, u32>,
}
/// Linear SVM model produced by training and used during classification.
#[derive(Serialize, Deserialize)]
struct Model {
features: HashMap<String, u32>,
classes: HashMap<String, Vec<(u32, f64)>>,
}
struct ClassifiedTest {
name: String,
class_scores: Vec<(String, f64)>,
}
impl FeatureMap {
fn intern(&mut self, feature: Cow<str>, read_only: bool) -> Option<u32> {
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
}
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) && !KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if !top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name() != "auxiliary")
{
let entry = entry?;
if !entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight != 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?; | let mut feature_map = FeatureMap { map: mem::take(&mut model.features), features: Vec::new() };
// Classify tests that are not yet classified using the model.
let mut files = HashMap::default();
for dir in &[&root.join("issues"), root] {
for entry in fs::read_dir(dir)? {
let entry = entry?;
if !entry.file_type()?.is_dir() && entry.file_name() != ".gitattributes" {
let path = entry.path();
if let Ok(s) = fs::read_to_string(&path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), true);
files.insert(file_name, RefCell::new(features));
}
}
}
}
let mut classified_tests = Vec::new();
for (name, features) in files_to_tests(files) {
let mut model_scores = Vec::new();
for (model_name, weights) in &model.classes {
let score = get_decision_value(weights, &features);
model_scores.push((model_name, score));
}
// Print three classes with highest decision values.
model_scores.sort_by(|(_, sc1), (_, sc2)| sc1.total_cmp(&sc2));
classified_tests.push(ClassifiedTest {
name,
class_scores: model_scores
.into_iter()
.rev()
.take(3)
.map(|(name, score)| (name.clone(), score))
.collect(),
});
}
let re = Regex::new(r"issue-(\d+)").unwrap();
classified_tests.sort_by(|test1, test2| test2.max_score().total_cmp(&test1.max_score()));
for test in classified_tests {
let mut msg = format!(
"- [{}](https://github.com/rust-lang/rust/blob/master/src/test/ui/{}) <sup>",
test.name, test.name
);
let issue = match re.captures(&test.name) {
Some(captures) => {
format!("[issue](https://github.com/rust-lang/rust/issues/{})", &captures[1])
}
None => "unknown".to_string(),
};
msg.push_str(&issue);
msg.push_str("</sup>: ");
for (i, (name, score)) in test.class_scores.iter().enumerate() {
if i != 0 {
msg.push_str(", ");
}
msg.push_str(&format!("{name} ({score:.3})"));
}
println!("{}", msg);
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
let matches = App::new("UI test classifier")
.args_from_usage(
"--train 'Train the classifier'
--classify 'Classify tests'",
)
.get_matches();
// FIXME: Make it configurable.
let root = Path::new("C:/msys64/home/we/rust/src/test/ui");
if matches.is_present("train") {
train(root)?;
}
if matches.is_present("classify") {
classify(root)?;
}
Ok(())
} | let mut model: Model = serde_json::from_str(&model_str)?; | random_line_split |
main.rs | #![feature(array_windows)]
#![feature(format_args_capture)]
#![feature(total_cmp)]
use clap::App;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use liblinear::util::TrainingInput;
use liblinear::{Builder as LiblinearBuilder, LibLinearModel as _, SolverType};
use regex::Regex;
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use std::borrow::Cow;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::error::Error;
use std::path::Path;
use std::{fs, mem};
enum LexerState {
Start,
ContinueIdent,
ContinuePunct,
}
/// Mapping string-based features to integer indices and back.
#[derive(Default)]
struct FeatureMap {
features: Vec<String>,
map: HashMap<String, u32>,
}
/// Linear SVM model produced by training and used during classification.
#[derive(Serialize, Deserialize)]
struct Model {
features: HashMap<String, u32>,
classes: HashMap<String, Vec<(u32, f64)>>,
}
struct ClassifiedTest {
name: String,
class_scores: Vec<(String, f64)>,
}
impl FeatureMap {
fn intern(&mut self, feature: Cow<str>, read_only: bool) -> Option<u32> |
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) && !KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if !top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name() != "auxiliary")
{
let entry = entry?;
if !entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight != 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?;
let mut model: Model = serde_json::from_str(&model_str)?;
let mut feature_map = FeatureMap { map: mem::take(&mut model.features), features: Vec::new() };
// Classify tests that are not yet classified using the model.
let mut files = HashMap::default();
for dir in &[&root.join("issues"), root] {
for entry in fs::read_dir(dir)? {
let entry = entry?;
if !entry.file_type()?.is_dir() && entry.file_name() != ".gitattributes" {
let path = entry.path();
if let Ok(s) = fs::read_to_string(&path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), true);
files.insert(file_name, RefCell::new(features));
}
}
}
}
let mut classified_tests = Vec::new();
for (name, features) in files_to_tests(files) {
let mut model_scores = Vec::new();
for (model_name, weights) in &model.classes {
let score = get_decision_value(weights, &features);
model_scores.push((model_name, score));
}
// Print three classes with highest decision values.
model_scores.sort_by(|(_, sc1), (_, sc2)| sc1.total_cmp(&sc2));
classified_tests.push(ClassifiedTest {
name,
class_scores: model_scores
.into_iter()
.rev()
.take(3)
.map(|(name, score)| (name.clone(), score))
.collect(),
});
}
let re = Regex::new(r"issue-(\d+)").unwrap();
classified_tests.sort_by(|test1, test2| test2.max_score().total_cmp(&test1.max_score()));
for test in classified_tests {
let mut msg = format!(
"- [{}](https://github.com/rust-lang/rust/blob/master/src/test/ui/{}) <sup>",
test.name, test.name
);
let issue = match re.captures(&test.name) {
Some(captures) => {
format!("[issue](https://github.com/rust-lang/rust/issues/{})", &captures[1])
}
None => "unknown".to_string(),
};
msg.push_str(&issue);
msg.push_str("</sup>: ");
for (i, (name, score)) in test.class_scores.iter().enumerate() {
if i != 0 {
msg.push_str(", ");
}
msg.push_str(&format!("{name} ({score:.3})"));
}
println!("{}", msg);
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
let matches = App::new("UI test classifier")
.args_from_usage(
"--train 'Train the classifier'
--classify 'Classify tests'",
)
.get_matches();
// FIXME: Make it configurable.
let root = Path::new("C:/msys64/home/we/rust/src/test/ui");
if matches.is_present("train") {
train(root)?;
}
if matches.is_present("classify") {
classify(root)?;
}
Ok(())
}
| {
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
} | identifier_body |
main.rs | #![feature(array_windows)]
#![feature(format_args_capture)]
#![feature(total_cmp)]
use clap::App;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use liblinear::util::TrainingInput;
use liblinear::{Builder as LiblinearBuilder, LibLinearModel as _, SolverType};
use regex::Regex;
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use std::borrow::Cow;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::error::Error;
use std::path::Path;
use std::{fs, mem};
enum LexerState {
Start,
ContinueIdent,
ContinuePunct,
}
/// Mapping string-based features to integer indices and back.
#[derive(Default)]
struct FeatureMap {
features: Vec<String>,
map: HashMap<String, u32>,
}
/// Linear SVM model produced by training and used during classification.
#[derive(Serialize, Deserialize)]
struct Model {
features: HashMap<String, u32>,
classes: HashMap<String, Vec<(u32, f64)>>,
}
struct ClassifiedTest {
name: String,
class_scores: Vec<(String, f64)>,
}
impl FeatureMap {
fn intern(&mut self, feature: Cow<str>, read_only: bool) -> Option<u32> {
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
}
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) && !KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
| lse if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if !top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name() != "auxiliary")
{
let entry = entry?;
if !entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight != 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?;
let mut model: Model = serde_json::from_str(&model_str)?;
let mut feature_map = FeatureMap { map: mem::take(&mut model.features), features: Vec::new() };
// Classify tests that are not yet classified using the model.
let mut files = HashMap::default();
for dir in &[&root.join("issues"), root] {
for entry in fs::read_dir(dir)? {
let entry = entry?;
if !entry.file_type()?.is_dir() && entry.file_name() != ".gitattributes" {
let path = entry.path();
if let Ok(s) = fs::read_to_string(&path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), true);
files.insert(file_name, RefCell::new(features));
}
}
}
}
let mut classified_tests = Vec::new();
for (name, features) in files_to_tests(files) {
let mut model_scores = Vec::new();
for (model_name, weights) in &model.classes {
let score = get_decision_value(weights, &features);
model_scores.push((model_name, score));
}
// Print three classes with highest decision values.
model_scores.sort_by(|(_, sc1), (_, sc2)| sc1.total_cmp(&sc2));
classified_tests.push(ClassifiedTest {
name,
class_scores: model_scores
.into_iter()
.rev()
.take(3)
.map(|(name, score)| (name.clone(), score))
.collect(),
});
}
let re = Regex::new(r"issue-(\d+)").unwrap();
classified_tests.sort_by(|test1, test2| test2.max_score().total_cmp(&test1.max_score()));
for test in classified_tests {
let mut msg = format!(
"- [{}](https://github.com/rust-lang/rust/blob/master/src/test/ui/{}) <sup>",
test.name, test.name
);
let issue = match re.captures(&test.name) {
Some(captures) => {
format!("[issue](https://github.com/rust-lang/rust/issues/{})", &captures[1])
}
None => "unknown".to_string(),
};
msg.push_str(&issue);
msg.push_str("</sup>: ");
for (i, (name, score)) in test.class_scores.iter().enumerate() {
if i != 0 {
msg.push_str(", ");
}
msg.push_str(&format!("{name} ({score:.3})"));
}
println!("{}", msg);
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
let matches = App::new("UI test classifier")
.args_from_usage(
"--train 'Train the classifier'
--classify 'Classify tests'",
)
.get_matches();
// FIXME: Make it configurable.
let root = Path::new("C:/msys64/home/we/rust/src/test/ui");
if matches.is_present("train") {
train(root)?;
}
if matches.is_present("classify") {
classify(root)?;
}
Ok(())
}
| prefix
} e | conditional_block |
main.rs | #![feature(array_windows)]
#![feature(format_args_capture)]
#![feature(total_cmp)]
use clap::App;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use liblinear::util::TrainingInput;
use liblinear::{Builder as LiblinearBuilder, LibLinearModel as _, SolverType};
use regex::Regex;
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use std::borrow::Cow;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::error::Error;
use std::path::Path;
use std::{fs, mem};
enum LexerState {
Start,
ContinueIdent,
ContinuePunct,
}
/// Mapping string-based features to integer indices and back.
#[derive(Default)]
struct FeatureMap {
features: Vec<String>,
map: HashMap<String, u32>,
}
/// Linear SVM model produced by training and used during classification.
#[derive(Serialize, Deserialize)]
struct Model {
features: HashMap<String, u32>,
classes: HashMap<String, Vec<(u32, f64)>>,
}
struct ClassifiedTest {
name: String,
class_scores: Vec<(String, f64)>,
}
impl FeatureMap {
fn intern(&mut self, feature: Cow<str>, read_only: bool) -> Option<u32> {
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
}
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn | (s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) && !KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if !top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name() != "auxiliary")
{
let entry = entry?;
if !entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight != 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?;
let mut model: Model = serde_json::from_str(&model_str)?;
let mut feature_map = FeatureMap { map: mem::take(&mut model.features), features: Vec::new() };
// Classify tests that are not yet classified using the model.
let mut files = HashMap::default();
for dir in &[&root.join("issues"), root] {
for entry in fs::read_dir(dir)? {
let entry = entry?;
if !entry.file_type()?.is_dir() && entry.file_name() != ".gitattributes" {
let path = entry.path();
if let Ok(s) = fs::read_to_string(&path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), true);
files.insert(file_name, RefCell::new(features));
}
}
}
}
let mut classified_tests = Vec::new();
for (name, features) in files_to_tests(files) {
let mut model_scores = Vec::new();
for (model_name, weights) in &model.classes {
let score = get_decision_value(weights, &features);
model_scores.push((model_name, score));
}
// Print three classes with highest decision values.
model_scores.sort_by(|(_, sc1), (_, sc2)| sc1.total_cmp(&sc2));
classified_tests.push(ClassifiedTest {
name,
class_scores: model_scores
.into_iter()
.rev()
.take(3)
.map(|(name, score)| (name.clone(), score))
.collect(),
});
}
let re = Regex::new(r"issue-(\d+)").unwrap();
classified_tests.sort_by(|test1, test2| test2.max_score().total_cmp(&test1.max_score()));
for test in classified_tests {
let mut msg = format!(
"- [{}](https://github.com/rust-lang/rust/blob/master/src/test/ui/{}) <sup>",
test.name, test.name
);
let issue = match re.captures(&test.name) {
Some(captures) => {
format!("[issue](https://github.com/rust-lang/rust/issues/{})", &captures[1])
}
None => "unknown".to_string(),
};
msg.push_str(&issue);
msg.push_str("</sup>: ");
for (i, (name, score)) in test.class_scores.iter().enumerate() {
if i != 0 {
msg.push_str(", ");
}
msg.push_str(&format!("{name} ({score:.3})"));
}
println!("{}", msg);
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
let matches = App::new("UI test classifier")
.args_from_usage(
"--train 'Train the classifier'
--classify 'Classify tests'",
)
.get_matches();
// FIXME: Make it configurable.
let root = Path::new("C:/msys64/home/we/rust/src/test/ui");
if matches.is_present("train") {
train(root)?;
}
if matches.is_present("classify") {
classify(root)?;
}
Ok(())
}
| generalize | identifier_name |
lib.rs | #![allow(dead_code)]
extern crate libc;
extern crate zmq_ffi;
#[macro_use]
extern crate cfg_if;
mod socket;
mod errno;
pub use socket::*;
pub use errno::*;
use std::ops::{ Deref, DerefMut };
use std::ffi;
use std::vec::Vec;
use std::slice;
use std::mem::transmute;
use libc::{ c_int, c_void, size_t };
pub const ZMQ_VERSION_MAJOR:i32 = 4;
pub const ZMQ_VERSION_MINOR:i32 = 1;
pub const ZMQ_VERSION_PATCH:i32 = 4;
macro_rules! ret_when_null {
($ptr: expr) => {{
if $ptr.is_null() {
return Err(Error::from_last_err());
}
}}
}
#[macro_export]
macro_rules! ZMQ_MAKE_VERSION {
($major: expr, $minor: expr, $patch: expr) => {
{
$major * 10000 + $minor * 100 + $patch
}
}
}
pub const ZMQ_VERSION:i32 = ZMQ_MAKE_VERSION!(
ZMQ_VERSION_MAJOR,
ZMQ_VERSION_MINOR,
ZMQ_VERSION_PATCH
);
fn | () -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
None
} else {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc != 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
///
/// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library
pub fn has_capability(capability: &str) -> bool {
let capability_cstr = ffi::CString::new(capability).unwrap();
let rc = unsafe { zmq_ffi::zmq_has(capability_cstr.as_ptr()) };
rc == 1
}
// Encryption functions
/* Encode data with Z85 encoding. Returns encoded data */
//ZMQ_EXPORT char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);
/// Encode a binary key as Z85 printable text
///
/// Binding of `char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);`
///
/// The function will encode the binary block specified by data and size into a string in dest.
/// The size of the binary block must be divisible by 4.
pub fn z85_encode(data: &[u8]) -> Result<String, Error> {
let len = data.len() as i32 * 5 / 4 + 1;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_encode(transmute(dest.as_mut_ptr()), data.as_ptr(), data.len()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
let cstr = ffi::CStr::from_ptr(transmute(dest.as_ptr()));
Ok(String::from_utf8(cstr.to_bytes().to_vec()).unwrap())
}
}
}
/// Decode a binary key from Z85 printable text
///
/// Binding of `uint8_t *zmq_z85_decode (uint8_t *dest, const char *string);`
///
/// The function will decode string into dest. The length of string in bytes shall be divisible by 5
pub fn z85_decode(encoded: &str) -> Result<Vec<u8>, Error> {
let encoded_cstr = ffi::CString::new(encoded).unwrap();
let len = (encoded_cstr.as_bytes().len() as i32 * 4 / 5) as i32;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_decode(dest.as_mut_ptr(), encoded_cstr.as_ptr()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
}
Ok(dest)
}
}
/// Generate z85-encoded public and private keypair with libsodium.
///
/// Binding of `int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);`
///
/// The function will return a newly generated random keypair consisting of a public key and a secret key.
/// The keys are encoded using z85_encode().
pub fn gen_curve_keypair() -> Result<(String, String), Error> {
let mut public_key: Vec<u8> = Vec::with_capacity(41);
let mut secret_key: Vec<u8> = Vec::with_capacity(41);
let rc = unsafe {
zmq_ffi::zmq_curve_keypair(
transmute(public_key.as_mut_ptr()),
transmute(secret_key.as_mut_ptr())
)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
unsafe {
public_key.set_len(40);
secret_key.set_len(40);
}
Ok((String::from_utf8(public_key).unwrap(), String::from_utf8(secret_key).unwrap()))
}
}
| errno | identifier_name |
lib.rs | #![allow(dead_code)]
extern crate libc;
extern crate zmq_ffi;
#[macro_use]
extern crate cfg_if;
mod socket;
mod errno;
pub use socket::*;
pub use errno::*;
use std::ops::{ Deref, DerefMut };
use std::ffi;
use std::vec::Vec;
use std::slice;
use std::mem::transmute;
use libc::{ c_int, c_void, size_t };
pub const ZMQ_VERSION_MAJOR:i32 = 4;
pub const ZMQ_VERSION_MINOR:i32 = 1;
pub const ZMQ_VERSION_PATCH:i32 = 4;
macro_rules! ret_when_null {
($ptr: expr) => {{
if $ptr.is_null() {
return Err(Error::from_last_err());
}
}}
}
#[macro_export]
macro_rules! ZMQ_MAKE_VERSION {
($major: expr, $minor: expr, $patch: expr) => {
{
$major * 10000 + $minor * 100 + $patch
}
}
}
pub const ZMQ_VERSION:i32 = ZMQ_MAKE_VERSION!(
ZMQ_VERSION_MAJOR,
ZMQ_VERSION_MINOR,
ZMQ_VERSION_PATCH
);
fn errno() -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
None
} else {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc != 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
/// | pub fn has_capability(capability: &str) -> bool {
let capability_cstr = ffi::CString::new(capability).unwrap();
let rc = unsafe { zmq_ffi::zmq_has(capability_cstr.as_ptr()) };
rc == 1
}
// Encryption functions
/* Encode data with Z85 encoding. Returns encoded data */
//ZMQ_EXPORT char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);
/// Encode a binary key as Z85 printable text
///
/// Binding of `char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);`
///
/// The function will encode the binary block specified by data and size into a string in dest.
/// The size of the binary block must be divisible by 4.
pub fn z85_encode(data: &[u8]) -> Result<String, Error> {
let len = data.len() as i32 * 5 / 4 + 1;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_encode(transmute(dest.as_mut_ptr()), data.as_ptr(), data.len()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
let cstr = ffi::CStr::from_ptr(transmute(dest.as_ptr()));
Ok(String::from_utf8(cstr.to_bytes().to_vec()).unwrap())
}
}
}
/// Decode a binary key from Z85 printable text
///
/// Binding of `uint8_t *zmq_z85_decode (uint8_t *dest, const char *string);`
///
/// The function will decode string into dest. The length of string in bytes shall be divisible by 5
pub fn z85_decode(encoded: &str) -> Result<Vec<u8>, Error> {
let encoded_cstr = ffi::CString::new(encoded).unwrap();
let len = (encoded_cstr.as_bytes().len() as i32 * 4 / 5) as i32;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_decode(dest.as_mut_ptr(), encoded_cstr.as_ptr()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
}
Ok(dest)
}
}
/// Generate z85-encoded public and private keypair with libsodium.
///
/// Binding of `int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);`
///
/// The function will return a newly generated random keypair consisting of a public key and a secret key.
/// The keys are encoded using z85_encode().
pub fn gen_curve_keypair() -> Result<(String, String), Error> {
let mut public_key: Vec<u8> = Vec::with_capacity(41);
let mut secret_key: Vec<u8> = Vec::with_capacity(41);
let rc = unsafe {
zmq_ffi::zmq_curve_keypair(
transmute(public_key.as_mut_ptr()),
transmute(secret_key.as_mut_ptr())
)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
unsafe {
public_key.set_len(40);
secret_key.set_len(40);
}
Ok((String::from_utf8(public_key).unwrap(), String::from_utf8(secret_key).unwrap()))
}
} | /// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library | random_line_split |
lib.rs | #![allow(dead_code)]
extern crate libc;
extern crate zmq_ffi;
#[macro_use]
extern crate cfg_if;
mod socket;
mod errno;
pub use socket::*;
pub use errno::*;
use std::ops::{ Deref, DerefMut };
use std::ffi;
use std::vec::Vec;
use std::slice;
use std::mem::transmute;
use libc::{ c_int, c_void, size_t };
pub const ZMQ_VERSION_MAJOR:i32 = 4;
pub const ZMQ_VERSION_MINOR:i32 = 1;
pub const ZMQ_VERSION_PATCH:i32 = 4;
macro_rules! ret_when_null {
($ptr: expr) => {{
if $ptr.is_null() {
return Err(Error::from_last_err());
}
}}
}
#[macro_export]
macro_rules! ZMQ_MAKE_VERSION {
($major: expr, $minor: expr, $patch: expr) => {
{
$major * 10000 + $minor * 100 + $patch
}
}
}
pub const ZMQ_VERSION:i32 = ZMQ_MAKE_VERSION!(
ZMQ_VERSION_MAJOR,
ZMQ_VERSION_MINOR,
ZMQ_VERSION_PATCH
);
fn errno() -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
| /// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
None
} else {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc != 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
///
/// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library
pub fn has_capability(capability: &str) -> bool {
let capability_cstr = ffi::CString::new(capability).unwrap();
let rc = unsafe { zmq_ffi::zmq_has(capability_cstr.as_ptr()) };
rc == 1
}
// Encryption functions
/* Encode data with Z85 encoding. Returns encoded data */
//ZMQ_EXPORT char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);
/// Encode a binary key as Z85 printable text
///
/// Binding of `char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);`
///
/// The function will encode the binary block specified by data and size into a string in dest.
/// The size of the binary block must be divisible by 4.
pub fn z85_encode(data: &[u8]) -> Result<String, Error> {
let len = data.len() as i32 * 5 / 4 + 1;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_encode(transmute(dest.as_mut_ptr()), data.as_ptr(), data.len()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
let cstr = ffi::CStr::from_ptr(transmute(dest.as_ptr()));
Ok(String::from_utf8(cstr.to_bytes().to_vec()).unwrap())
}
}
}
/// Decode a binary key from Z85 printable text
///
/// Binding of `uint8_t *zmq_z85_decode (uint8_t *dest, const char *string);`
///
/// The function will decode string into dest. The length of string in bytes shall be divisible by 5
pub fn z85_decode(encoded: &str) -> Result<Vec<u8>, Error> {
let encoded_cstr = ffi::CString::new(encoded).unwrap();
let len = (encoded_cstr.as_bytes().len() as i32 * 4 / 5) as i32;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_decode(dest.as_mut_ptr(), encoded_cstr.as_ptr()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
}
Ok(dest)
}
}
/// Generate z85-encoded public and private keypair with libsodium.
///
/// Binding of `int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);`
///
/// The function will return a newly generated random keypair consisting of a public key and a secret key.
/// The keys are encoded using z85_encode().
pub fn gen_curve_keypair() -> Result<(String, String), Error> {
let mut public_key: Vec<u8> = Vec::with_capacity(41);
let mut secret_key: Vec<u8> = Vec::with_capacity(41);
let rc = unsafe {
zmq_ffi::zmq_curve_keypair(
transmute(public_key.as_mut_ptr()),
transmute(secret_key.as_mut_ptr())
)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
unsafe {
public_key.set_len(40);
secret_key.set_len(40);
}
Ok((String::from_utf8(public_key).unwrap(), String::from_utf8(secret_key).unwrap()))
}
}
| unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
| identifier_body |
lib.rs | #![allow(dead_code)]
extern crate libc;
extern crate zmq_ffi;
#[macro_use]
extern crate cfg_if;
mod socket;
mod errno;
pub use socket::*;
pub use errno::*;
use std::ops::{ Deref, DerefMut };
use std::ffi;
use std::vec::Vec;
use std::slice;
use std::mem::transmute;
use libc::{ c_int, c_void, size_t };
pub const ZMQ_VERSION_MAJOR:i32 = 4;
pub const ZMQ_VERSION_MINOR:i32 = 1;
pub const ZMQ_VERSION_PATCH:i32 = 4;
macro_rules! ret_when_null {
($ptr: expr) => {{
if $ptr.is_null() {
return Err(Error::from_last_err());
}
}}
}
#[macro_export]
macro_rules! ZMQ_MAKE_VERSION {
($major: expr, $minor: expr, $patch: expr) => {
{
$major * 10000 + $minor * 100 + $patch
}
}
}
pub const ZMQ_VERSION:i32 = ZMQ_MAKE_VERSION!(
ZMQ_VERSION_MAJOR,
ZMQ_VERSION_MINOR,
ZMQ_VERSION_PATCH
);
fn errno() -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
| e {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc != 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
///
/// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library
pub fn has_capability(capability: &str) -> bool {
let capability_cstr = ffi::CString::new(capability).unwrap();
let rc = unsafe { zmq_ffi::zmq_has(capability_cstr.as_ptr()) };
rc == 1
}
// Encryption functions
/* Encode data with Z85 encoding. Returns encoded data */
//ZMQ_EXPORT char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);
/// Encode a binary key as Z85 printable text
///
/// Binding of `char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);`
///
/// The function will encode the binary block specified by data and size into a string in dest.
/// The size of the binary block must be divisible by 4.
pub fn z85_encode(data: &[u8]) -> Result<String, Error> {
let len = data.len() as i32 * 5 / 4 + 1;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_encode(transmute(dest.as_mut_ptr()), data.as_ptr(), data.len()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
let cstr = ffi::CStr::from_ptr(transmute(dest.as_ptr()));
Ok(String::from_utf8(cstr.to_bytes().to_vec()).unwrap())
}
}
}
/// Decode a binary key from Z85 printable text
///
/// Binding of `uint8_t *zmq_z85_decode (uint8_t *dest, const char *string);`
///
/// The function will decode string into dest. The length of string in bytes shall be divisible by 5
pub fn z85_decode(encoded: &str) -> Result<Vec<u8>, Error> {
let encoded_cstr = ffi::CString::new(encoded).unwrap();
let len = (encoded_cstr.as_bytes().len() as i32 * 4 / 5) as i32;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_decode(dest.as_mut_ptr(), encoded_cstr.as_ptr()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
}
Ok(dest)
}
}
/// Generate z85-encoded public and private keypair with libsodium.
///
/// Binding of `int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);`
///
/// The function will return a newly generated random keypair consisting of a public key and a secret key.
/// The keys are encoded using z85_encode().
pub fn gen_curve_keypair() -> Result<(String, String), Error> {
let mut public_key: Vec<u8> = Vec::with_capacity(41);
let mut secret_key: Vec<u8> = Vec::with_capacity(41);
let rc = unsafe {
zmq_ffi::zmq_curve_keypair(
transmute(public_key.as_mut_ptr()),
transmute(secret_key.as_mut_ptr())
)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
unsafe {
public_key.set_len(40);
secret_key.set_len(40);
}
Ok((String::from_utf8(public_key).unwrap(), String::from_utf8(secret_key).unwrap()))
}
}
| None
} els | conditional_block |
ui.js | //-------------------------------------------------- //
/*
UI.js -
Handles the drawing of the main UI
*/
//---------------------------------------------------//
(function ($){
var Interface = function(getLayerContext, state){
//some constants
this.LAYER_ID = game.UI;
this.LEFT_CLICK = 1;
this.layercontext = null;
this.state = null; //game state
this.playername = null; //the player's name
this.score = null; //displays score during play time and high score otherwise
this.multiplier = null; //number of planes
this.difficulty = null; //determines amount of missiles at one point in time
this.play_button = null; //starts game when clicked
this.instructions = null; //displays instructions
this.score_menu = null; //displays all-time high scores when clicked
this.displaying_score = false;
this.minimize_score_button = null;
this.img_src = ["media/plane1_top.png", "media/plane2_top.png", "media/plane3_top.png"];
this.images = [];
this.image_divs = [];
this.plane_choices = [];
this.difficulty_divs = [];
this.difficulties = ["Easy", "Medium", "Hard", "Insane"];
this.selected_difficulty = null;
this.loading_img = null;
this.currentframe = 0;
this.frame_x = [];
this.anim_speed = 2;
this.init = function(getLayerContext, state){
this.layercontext = getLayerContext(this.LAYER_ID);
this.state = state;
$("#name").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
$("#name>form").on({
submit: this.enter_name.bind(this)
});
$("#name>form>input").mouseup(function(event){
$(this).focus();
});
this.score = $("#score").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
this.multiplier = $("#planes").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
for(var i=0; i<3; i++){
this.images[i] = new Image();
this.images[i].src = this.img_src[i];
this.image_divs[i] = $("<div></div>",{
"id": "plane"+i,
"class": "ui"
}).css({
"opacity": 0,
"padding": "0px 0px 0px 0px",
"display": "inline",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.select_plane.bind(this)
}).appendTo(this.multiplier);
this.plane_choices[i] = false;
}
this.difficulty = $("#difficulty").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
for(var i=0; i<4; i++){
this.difficulty_divs[i] = $("<div>"+this.difficulties[i]+"</div>")
.css({
"display": "inline",
"z-index": this.LAYER_ID+1,
"padding": "10px 10px 10px 10px"
}).on({
mouseup: this.select_difficulty.bind(this)
}).appendTo(this.difficulty);
}
this.play_button = $("#play").css({
"display": "block",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.play_clicked.bind(this)
});
this.instructions = $("#instructions").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
this.score_menu = $("#all_scores").css({
"display": "block",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.show_scores.bind(this)
});
this.minimize_score_button = $("#minimize").on({
mouseup: this.hide_scores.bind(this)
});
this.loading_img = new Image();
this.loading_img.src = "media/loading.png";
this.loading_img.width = this.loading_img.height = 128;
for(var i=0; i<19; i++){
this.frame_x[i] = i * this.loading_img.width; | switch(this.state){
case "starting":
$(".ui").css({
"z-index": this.LAYER_ID+1
});
break;
case "running":
break;
case "loading":
//check if images finish loading before starting game
var allLoaded = true;
for(var i=0; i<game.system.entities.length; i++){
allLoaded = allLoaded && game.system.entities[i].isLoaded;
}
if(!allLoaded) {
break;
}
this.score.css("z-index", this.LAYER_ID+1);
game.system.layermanager.clearLayer(this.LAYER_ID);
game.system.state = this.state = "running";
break;
default:
break;
}
};
this.draw = function(){
switch(this.state){
case "starting":
this.score.html("High Score: "+game.system.highscore)
.css({
"background-color": "",
"top": "",
"left": ""
});
this.layercontext.fillRect(0,0,game.CANVAS_W,game.CANVAS_H);
var x = this.multiplier.position().left + this.multiplier.outerWidth();
y = this.multiplier.position().top;
var img_width, img_x;
for(var i=0; i<this.images.length; i++){
img_width = this.multiplier.outerHeight()*this.images[i].width/this.images[i].height;
img_x = x + 30+ i*(img_width+20);
this.layercontext.drawImage(this.images[i],
img_x, y,
img_width, this.multiplier.outerHeight()
);
if(this.plane_choices[i]){
this.image_divs[i].css({
"opacity": 1,
"background-color": "rgba(222,135,229,0.5)",
"top": 0,
"left": img_x - this.multiplier.position().left,
"width": img_width,
"height": this.multiplier.outerHeight()
});
}else{
this.image_divs[i].css({
"opacity": 0,
"top": 0,
"left": img_x - this.multiplier.position().left,
"width": img_width,
"height": this.multiplier.outerHeight()
});
}
}
break;
case "running":
this.score.html(game.system.score)
.css({
"background-color": "rgba(222,135,229,0.8)",
"top": 0,
"left": "85%"
});
break;
case "loading":
this.score.css("z-index", -1);
this.layercontext.save();
this.layercontext.translate(game.CANVAS_W/2, game.CANVAS_H/2);
this.layercontext.drawImage(this.loading_img,
this.frame_x[this.currentframe/this.anim_speed], 0,
this.loading_img.width, this.loading_img.height,
-this.loading_img.width/2, -this.loading_img.height/2,
this.loading_img.width, this.loading_img.height
);
this.layercontext.restore();
this.currentframe++;
if(this.currentframe == this.frame_x.length*this.anim_speed){
this.currentframe = 0;
}
break;
default:
break;
}
};
this.resized = function(){
this.layercontext.fillStyle = "rgba(256,256,256,0.4)";
this.layercontext.strokeStyle = "rgba(0,82,156, 0.8)";
this.layercontext.lineWidth = 5;
};
this.play_clicked = function(event){
//init players
game.system.num_players = 0;
for(var i=0; i<this.plane_choices.length; i++){
if(this.plane_choices[i]){
game.system.players[game.system.num_players++] = new game.Player(i, game.system.layermanager);
}
}
//init missiles
switch (this.selected_difficulty){
case "Easy":
game.system.num_enemies = 15;
break;
case "Medium":
game.system.num_enemies = 30;
break;
case "Hard":
game.system.num_enemies = 60;
break;
case "Insane":
game.system.num_enemies = 120;
break;
default:
break;
}
for(var i=0; i<game.system.num_enemies; i++){
game.system.enemies[i] = new game.Enemy("missile", game.system.layermanager);
}
if(game.system.num_players!=0 && game.system.num_enemies!=0){
//add all players and missiles to entities
game.system.entities.length = 0;
for(var i=0; i<game.system.num_players; i++){
game.system.entities[game.system.entities.length] = game.system.players[i];
game.system.players[i].state = "alive";
}
for(var i=0; i<game.system.num_enemies; i++){
game.system.entities[game.system.entities.length] = game.system.enemies[i];
game.system.enemies[i].state = "setup";
}
}
//clear ui
$(".ui").css({
"z-index": -1
});
this.score.css("z-index", this.LAYER_ID+1);
game.system.layermanager.clearLayer(this.LAYER_ID);
this.state = "loading";
event.preventDefault();
event.stopPropagation();
return false;
};
this.select_plane = function(event){
if(event.which == this.LEFT_CLICK){
var target = event.target;
for(var i=0; i<this.image_divs.length; i++){
if(target.id == this.image_divs[i].attr("id")){
this.plane_choices[i] = !this.plane_choices[i];
}
}
}
event.preventDefault();
event.stopPropagation();
return false;
};
this.select_difficulty = function(event){
if(event.which == this.LEFT_CLICK){
var target = $(event.target);
for(var i =0; i<this.difficulties.length; i++){
if(target[0].innerHTML == this.difficulties[i]){
this.selected_difficulty = this.difficulties[i];
this.difficulty_divs[i].css({
"background-color": "rgba(222,135,229,0.5)"
});
}else{
this.difficulty_divs[i].css({
"background-color": "rgba(0,82,156, 0)"
});
}
}
}
event.preventDefault();
event.stopPropagation();
return false;
};
this.enter_name = function(event){
this.playername = $("#name>form>input").blur().val();
event.preventDefault();
return false;
};
this.show_scores = function(event){
if(!this.displaying_score){
this.score_menu.css({
"height": "90%",
"width": "70%",
"left": "15%",
"background-color": "rgba(0,82,156,1.0)",
"padding-bottom": "10px"
});
this.minimize_score_button.css({
"display": "inline"
});
$.getJSON("/highscores", function(data){
var htmlStr = "<table style='width: 90%; text-align: left; margin-left: 20px'>";
//add table column headers
htmlStr += "<tr> <th style='width:80px'>Rank</th> <th>Name</th> <th>Score</th> </tr>";
for(var i=0; i<data.length; i++){
htmlStr += "<tr style='font-size:80%'> <td>" + (i+1) + "</td> <td>" + data[i].name + "</td> <td>" + data[i].score + "</td> </tr>";
}
htmlStr += "</table>";
$("#score_table").html(htmlStr);
});
}
this.displaying_score = true;
event.stopPropagation();
return false;
};
this.hide_scores = function(event){
this.score_menu.css({
"height": "",
"width": "",
"left": "",
"background-color": "",
"padding-bottom": ""
}).removeAttr('p');
this.minimize_score_button.css({
"display": ""
});
$("#score_table").html("");
this.displaying_score = false;
event.stopPropagation();
return false;
};
this.init(getLayerContext, state);
};
//add class to game Namespace
game.Interface = Interface;
})(jQuery); | }
};
this.update = function(){ | random_line_split |
ui.js | //-------------------------------------------------- //
/*
UI.js -
Handles the drawing of the main UI
*/
//---------------------------------------------------//
(function ($){
var Interface = function(getLayerContext, state){
//some constants
this.LAYER_ID = game.UI;
this.LEFT_CLICK = 1;
this.layercontext = null;
this.state = null; //game state
this.playername = null; //the player's name
this.score = null; //displays score during play time and high score otherwise
this.multiplier = null; //number of planes
this.difficulty = null; //determines amount of missiles at one point in time
this.play_button = null; //starts game when clicked
this.instructions = null; //displays instructions
this.score_menu = null; //displays all-time high scores when clicked
this.displaying_score = false;
this.minimize_score_button = null;
this.img_src = ["media/plane1_top.png", "media/plane2_top.png", "media/plane3_top.png"];
this.images = [];
this.image_divs = [];
this.plane_choices = [];
this.difficulty_divs = [];
this.difficulties = ["Easy", "Medium", "Hard", "Insane"];
this.selected_difficulty = null;
this.loading_img = null;
this.currentframe = 0;
this.frame_x = [];
this.anim_speed = 2;
this.init = function(getLayerContext, state){
this.layercontext = getLayerContext(this.LAYER_ID);
this.state = state;
$("#name").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
$("#name>form").on({
submit: this.enter_name.bind(this)
});
$("#name>form>input").mouseup(function(event){
$(this).focus();
});
this.score = $("#score").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
this.multiplier = $("#planes").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
for(var i=0; i<3; i++){
this.images[i] = new Image();
this.images[i].src = this.img_src[i];
this.image_divs[i] = $("<div></div>",{
"id": "plane"+i,
"class": "ui"
}).css({
"opacity": 0,
"padding": "0px 0px 0px 0px",
"display": "inline",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.select_plane.bind(this)
}).appendTo(this.multiplier);
this.plane_choices[i] = false;
}
this.difficulty = $("#difficulty").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
for(var i=0; i<4; i++){
this.difficulty_divs[i] = $("<div>"+this.difficulties[i]+"</div>")
.css({
"display": "inline",
"z-index": this.LAYER_ID+1,
"padding": "10px 10px 10px 10px"
}).on({
mouseup: this.select_difficulty.bind(this)
}).appendTo(this.difficulty);
}
this.play_button = $("#play").css({
"display": "block",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.play_clicked.bind(this)
});
this.instructions = $("#instructions").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
this.score_menu = $("#all_scores").css({
"display": "block",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.show_scores.bind(this)
});
this.minimize_score_button = $("#minimize").on({
mouseup: this.hide_scores.bind(this)
});
this.loading_img = new Image();
this.loading_img.src = "media/loading.png";
this.loading_img.width = this.loading_img.height = 128;
for(var i=0; i<19; i++){
this.frame_x[i] = i * this.loading_img.width;
}
};
this.update = function(){
switch(this.state){
case "starting":
$(".ui").css({
"z-index": this.LAYER_ID+1
});
break;
case "running":
break;
case "loading":
//check if images finish loading before starting game
var allLoaded = true;
for(var i=0; i<game.system.entities.length; i++){
allLoaded = allLoaded && game.system.entities[i].isLoaded;
}
if(!allLoaded) |
this.score.css("z-index", this.LAYER_ID+1);
game.system.layermanager.clearLayer(this.LAYER_ID);
game.system.state = this.state = "running";
break;
default:
break;
}
};
this.draw = function(){
switch(this.state){
case "starting":
this.score.html("High Score: "+game.system.highscore)
.css({
"background-color": "",
"top": "",
"left": ""
});
this.layercontext.fillRect(0,0,game.CANVAS_W,game.CANVAS_H);
var x = this.multiplier.position().left + this.multiplier.outerWidth();
y = this.multiplier.position().top;
var img_width, img_x;
for(var i=0; i<this.images.length; i++){
img_width = this.multiplier.outerHeight()*this.images[i].width/this.images[i].height;
img_x = x + 30+ i*(img_width+20);
this.layercontext.drawImage(this.images[i],
img_x, y,
img_width, this.multiplier.outerHeight()
);
if(this.plane_choices[i]){
this.image_divs[i].css({
"opacity": 1,
"background-color": "rgba(222,135,229,0.5)",
"top": 0,
"left": img_x - this.multiplier.position().left,
"width": img_width,
"height": this.multiplier.outerHeight()
});
}else{
this.image_divs[i].css({
"opacity": 0,
"top": 0,
"left": img_x - this.multiplier.position().left,
"width": img_width,
"height": this.multiplier.outerHeight()
});
}
}
break;
case "running":
this.score.html(game.system.score)
.css({
"background-color": "rgba(222,135,229,0.8)",
"top": 0,
"left": "85%"
});
break;
case "loading":
this.score.css("z-index", -1);
this.layercontext.save();
this.layercontext.translate(game.CANVAS_W/2, game.CANVAS_H/2);
this.layercontext.drawImage(this.loading_img,
this.frame_x[this.currentframe/this.anim_speed], 0,
this.loading_img.width, this.loading_img.height,
-this.loading_img.width/2, -this.loading_img.height/2,
this.loading_img.width, this.loading_img.height
);
this.layercontext.restore();
this.currentframe++;
if(this.currentframe == this.frame_x.length*this.anim_speed){
this.currentframe = 0;
}
break;
default:
break;
}
};
this.resized = function(){
this.layercontext.fillStyle = "rgba(256,256,256,0.4)";
this.layercontext.strokeStyle = "rgba(0,82,156, 0.8)";
this.layercontext.lineWidth = 5;
};
this.play_clicked = function(event){
//init players
game.system.num_players = 0;
for(var i=0; i<this.plane_choices.length; i++){
if(this.plane_choices[i]){
game.system.players[game.system.num_players++] = new game.Player(i, game.system.layermanager);
}
}
//init missiles
switch (this.selected_difficulty){
case "Easy":
game.system.num_enemies = 15;
break;
case "Medium":
game.system.num_enemies = 30;
break;
case "Hard":
game.system.num_enemies = 60;
break;
case "Insane":
game.system.num_enemies = 120;
break;
default:
break;
}
for(var i=0; i<game.system.num_enemies; i++){
game.system.enemies[i] = new game.Enemy("missile", game.system.layermanager);
}
if(game.system.num_players!=0 && game.system.num_enemies!=0){
//add all players and missiles to entities
game.system.entities.length = 0;
for(var i=0; i<game.system.num_players; i++){
game.system.entities[game.system.entities.length] = game.system.players[i];
game.system.players[i].state = "alive";
}
for(var i=0; i<game.system.num_enemies; i++){
game.system.entities[game.system.entities.length] = game.system.enemies[i];
game.system.enemies[i].state = "setup";
}
}
//clear ui
$(".ui").css({
"z-index": -1
});
this.score.css("z-index", this.LAYER_ID+1);
game.system.layermanager.clearLayer(this.LAYER_ID);
this.state = "loading";
event.preventDefault();
event.stopPropagation();
return false;
};
this.select_plane = function(event){
if(event.which == this.LEFT_CLICK){
var target = event.target;
for(var i=0; i<this.image_divs.length; i++){
if(target.id == this.image_divs[i].attr("id")){
this.plane_choices[i] = !this.plane_choices[i];
}
}
}
event.preventDefault();
event.stopPropagation();
return false;
};
this.select_difficulty = function(event){
if(event.which == this.LEFT_CLICK){
var target = $(event.target);
for(var i =0; i<this.difficulties.length; i++){
if(target[0].innerHTML == this.difficulties[i]){
this.selected_difficulty = this.difficulties[i];
this.difficulty_divs[i].css({
"background-color": "rgba(222,135,229,0.5)"
});
}else{
this.difficulty_divs[i].css({
"background-color": "rgba(0,82,156, 0)"
});
}
}
}
event.preventDefault();
event.stopPropagation();
return false;
};
this.enter_name = function(event){
this.playername = $("#name>form>input").blur().val();
event.preventDefault();
return false;
};
this.show_scores = function(event){
if(!this.displaying_score){
this.score_menu.css({
"height": "90%",
"width": "70%",
"left": "15%",
"background-color": "rgba(0,82,156,1.0)",
"padding-bottom": "10px"
});
this.minimize_score_button.css({
"display": "inline"
});
$.getJSON("/highscores", function(data){
var htmlStr = "<table style='width: 90%; text-align: left; margin-left: 20px'>";
//add table column headers
htmlStr += "<tr> <th style='width:80px'>Rank</th> <th>Name</th> <th>Score</th> </tr>";
for(var i=0; i<data.length; i++){
htmlStr += "<tr style='font-size:80%'> <td>" + (i+1) + "</td> <td>" + data[i].name + "</td> <td>" + data[i].score + "</td> </tr>";
}
htmlStr += "</table>";
$("#score_table").html(htmlStr);
});
}
this.displaying_score = true;
event.stopPropagation();
return false;
};
this.hide_scores = function(event){
this.score_menu.css({
"height": "",
"width": "",
"left": "",
"background-color": "",
"padding-bottom": ""
}).removeAttr('p');
this.minimize_score_button.css({
"display": ""
});
$("#score_table").html("");
this.displaying_score = false;
event.stopPropagation();
return false;
};
this.init(getLayerContext, state);
};
//add class to game Namespace
game.Interface = Interface;
})(jQuery); | {
break;
} | conditional_block |
input.go | package termui
import (
"github.com/nsf/termbox-go"
"strconv"
"strings"
)
// default mappings between /sys/kbd events and multi-line inputs
var multiLineCharMap = map[string]string{
"<space>": " ",
"<tab>": "\t",
"<enter>": "\n",
"<escape>": "",
}
// default mappings between /sys/kbd events and single line inputs
var singleLineCharMap = map[string]string{
"<space>": " ",
"<tab>": "\t",
"<enter>": "",
"<escape>": "",
}
const NEW_LINE = "\n"
const LINE_NO_MIN_SPACE = 1000
// EvtInput defines the structure for the /input/* events. The event contains the last keystroke, the full text
// for the current line, and the position of the cursor in the current line as well as the index of the current
// line in the full text of the input
type EvtInput struct {
KeyStr string
LineText string
CursorPosition int
LineIndex int
}
// Input is the main object for a text input. The object exposes the following public properties:
// TextFgColor: color for the text.
// TextBgColor: background color for the text box.
// IsCapturing: true if the input is currently capturing keyboard events, this is controlled by the StartCapture and
// StopCapture methods.
// IsMultiline: Whether we should accept multiple lines of input or this is a singe line form field.
// TextBuilder: An implementation of the TextBuilder interface to customize the look of the text on the screen.
// SpecialChars: a map[string]string of characters from the /sys/kbd events to actual strings in the content.
// Name: When specified, the Input uses its name to propagate events, for example /input/<name>/kbd.
type Input struct {
Block
TextFgColor Attribute
TextBgColor Attribute
IsCapturing bool
IsMultiLine bool
TextBuilder TextBuilder
SpecialChars map[string]string
ShowLineNo bool
Name string
CursorX int
CursorY int
//DebugMode bool
//debugMessage string
// internal vars
lines []string
cursorLineIndex int
cursorLinePos int
}
// NewInput returns a new, initialized Input object. The method receives the initial content for the input (if any)
// and whether it should be initialized as a multi-line innput field or not
func NewInput(s string, isMultiLine bool) *Input {
textArea := &Input{
Block: *NewBlock(),
TextFgColor: ThemeAttr("par.text.fg"),
TextBgColor: ThemeAttr("par.text.bg"),
TextBuilder: NewMarkdownTxBuilder(),
IsMultiLine: isMultiLine,
ShowLineNo: false,
cursorLineIndex: 0,
cursorLinePos: 0,
}
if s != "" {
textArea.SetText(s)
}
if isMultiLine {
textArea.SpecialChars = multiLineCharMap
} else {
textArea.SpecialChars = singleLineCharMap
}
return textArea
}
// StartCapture begins catching events from the /sys/kbd stream and updates the content of the Input field. While
// capturing events, the Input field also publishes its own event stream under the /input/kbd path.
func (i *Input) StartCapture() {
i.IsCapturing = true
Handle("/sys/kbd", func(e Event) {
if i.IsCapturing {
key := e.Data.(EvtKbd).KeyStr
switch key {
case "<up>":
i.moveUp()
case "<down>":
i.moveDown()
case "<left>":
i.moveLeft()
case "<right>":
i.moveRight()
case "C-8":
i.backspace()
default:
// If it's a CTRL something we don't handle then just ignore it
if strings.HasPrefix(key, "C-") {
break
}
newString := i.getCharString(key)
i.addString(newString)
}
if i.Name == "" {
SendCustomEvt("/input/kbd", i.getInputEvt(key))
} else {
SendCustomEvt("/input/" + i.Name + "/kbd", i.getInputEvt(key))
}
Render(i)
}
})
}
// StopCapture tells the Input field to stop accepting events from the /sys/kbd stream
func (i *Input) StopCapture() {
i.IsCapturing = false
}
// Text returns the text of the input field as a string
func (i *Input) Text() string {
if len(i.lines) == 0 {
return ""
}
if len(i.lines) == 1 {
return i.lines[0]
}
if i.IsMultiLine {
return strings.Join(i.lines, NEW_LINE)
} else {
// we should never get here!
return i.lines[0]
}
}
func (i *Input) SetText(text string) {
i.lines = strings.Split(text, NEW_LINE)
}
// Lines returns the slice of strings with the content of the input field. By default lines are separated by \n
func (i *Input) Lines() []string {
return i.lines
}
// Private methods for the input field
// TODO: handle delete key
func (i *Input) backspace() {
curLine := i.lines[i.cursorLineIndex]
// at the beginning of the buffer, nothing to do
if len(curLine) == 0 && i.cursorLineIndex == 0 {
return
}
// at the beginning of a line somewhere in the buffer
if i.cursorLinePos == 0 {
prevLine := i.lines[i.cursorLineIndex-1]
// remove the newline character from the prevline
prevLine = prevLine[:len(curLine)-1] + curLine
i.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
return
}
// I'm at the end of a line
if i.cursorLinePos == len(curLine)-1 {
i.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]
i.cursorLinePos--
return
}
// I'm in the middle of a line
i.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]
i.cursorLinePos--
}
func (i *Input) addString(key string) {
if len(i.lines) > 0 {
if key == NEW_LINE {
// special case when we go back to the beginning of a buffer with multiple lines, prepend a new line
if i.cursorLineIndex == 0 && len(i.lines) > 1 {
i.lines = append([]string{""}, i.lines...)
// this case handles newlines at the end of the file or in the middle of the file
} else {
newString := ""
// if we are inserting a newline in a populated line then set what goes into the new line
// and what stays in the current line
if i.cursorLinePos < len(i.lines[i.cursorLineIndex]) {
newString = i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = i.lines[i.cursorLineIndex][:i.cursorLinePos]
}
// append a newline in the current position with the content we computed in the previous if statement
i.lines = append(
i.lines[:i.cursorLineIndex+1],
append(
[]string{newString},
i.lines[i.cursorLineIndex+1:]...,
)...,
)
}
// increment the line index, reset the cursor to the beginning and return to skip the next step
i.cursorLineIndex++
i.cursorLinePos = 0
return
}
// cursor is at the end of the line
if i.cursorLinePos == len(i.lines[i.cursorLineIndex]) {
//i.debugMessage ="end"
i.lines[i.cursorLineIndex] += key
// cursor at the beginning of the line
} else if i.cursorLinePos == 0 {
//i.debugMessage = "beginning"
i.lines[i.cursorLineIndex] = key + i.lines[i.cursorLineIndex]
// cursor in the middle of the line
} else {
//i.debugMessage = "middle"
before := i.lines[i.cursorLineIndex][:i.cursorLinePos]
after := i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = before + key + after
}
i.cursorLinePos += len(key)
} else {
//i.debugMessage = "newline"
i.lines = append(i.lines, key)
i.cursorLinePos += len(key)
}
}
func (i *Input) moveUp() {
// if we are already on the first line then just move the cursor to the beginning
if i.cursorLineIndex == 0 {
i.cursorLinePos = 0
return
}
// The previous line is just as long, we can move to the same position in the line
prevLine := i.lines[i.cursorLineIndex-1]
if len(prevLine) >= i.cursorLinePos {
i.cursorLineIndex--
} else {
// otherwise we move the cursor to the end of the previous line
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
}
}
func (i *Input) moveDown() {
// we are already on the last line, we just need to move the position to the end of the line
if i.cursorLineIndex == len(i.lines)-1 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
return
}
// check if the cursor can move to the same position in the next line, otherwise move it to the end
nextLine := i.lines[i.cursorLineIndex+1]
if len(nextLine) >= i.cursorLinePos {
i.cursorLineIndex++
} else {
i.cursorLineIndex++
i.cursorLinePos = len(nextLine) - 1
}
}
func (i *Input) moveLeft() |
func (i *Input) moveRight() {
// if we are at the end of the line move to the next
if i.cursorLinePos >= len(i.lines[i.cursorLineIndex]) {
origLine := i.cursorLineIndex
i.moveDown()
if origLine < len(i.lines)-1 {
i.cursorLinePos = 0
}
return
}
i.cursorLinePos++
}
// Buffer implements Bufferer interface.
func (i *Input) Buffer() Buffer {
buf := i.Block.Buffer()
// offset used to display the line numbers
textXOffset := 0
bufferLines := i.lines[:]
firstLine := 0
lastLine := i.innerArea.Dy()
if i.IsMultiLine {
if i.cursorLineIndex >= lastLine {
firstLine += i.cursorLineIndex - lastLine + 1
lastLine += i.cursorLineIndex - lastLine + 1
}
if len(i.lines) < lastLine {
bufferLines = i.lines[firstLine:]
} else {
bufferLines = i.lines[firstLine:lastLine]
}
}
if i.ShowLineNo {
// forcing space for up to 1K
if lastLine < LINE_NO_MIN_SPACE {
textXOffset = len(strconv.Itoa(LINE_NO_MIN_SPACE)) + 2
} else {
textXOffset = len(strconv.Itoa(lastLine)) + 2 // one space at the beginning and one at the end
}
}
text := strings.Join(bufferLines, NEW_LINE)
// if the last line is empty then we add a fake space to make sure line numbers are displayed
if len(bufferLines) > 0 && bufferLines[len(bufferLines)-1] == "" && i.ShowLineNo {
text += " "
}
fg, bg := i.TextFgColor, i.TextBgColor
cs := i.TextBuilder.Build(text, fg, bg)
y, x, n := 0, 0, 0
lineNoCnt := 1
for n < len(cs) {
w := cs[n].Width()
if x == 0 && i.ShowLineNo {
curLineNoString := " " + strconv.Itoa(lineNoCnt) +
strings.Join(make([]string, textXOffset-len(strconv.Itoa(lineNoCnt))-1), " ")
//i.debugMessage = "Line no: " + curLineNoString
curLineNoRunes := i.TextBuilder.Build(curLineNoString, fg, bg)
for lineNo := 0; lineNo < len(curLineNoRunes); lineNo++ {
buf.Set(i.innerArea.Min.X+x+lineNo, i.innerArea.Min.Y+y, curLineNoRunes[lineNo])
}
lineNoCnt++
}
if cs[n].Ch == '\n' {
y++
n++
x = 0 // set x = 0
continue
}
buf.Set(i.innerArea.Min.X+x+textXOffset, i.innerArea.Min.Y+y, cs[n])
n++
x += w
}
cursorXOffset := i.X + textXOffset
if i.BorderLeft {
cursorXOffset++
}
cursorYOffset := i.Y// termui.TermHeight() - i.innerArea.Dy()
if i.BorderTop {
cursorYOffset++
}
if lastLine > i.innerArea.Dy() {
cursorYOffset += i.innerArea.Dy() - 1
} else {
cursorYOffset += i.cursorLineIndex
}
if i.IsCapturing {
i.CursorX = i.cursorLinePos+cursorXOffset
i.CursorY = cursorYOffset
termbox.SetCursor(i.cursorLinePos+cursorXOffset, cursorYOffset)
}
/*
if i.DebugMode {
position := fmt.Sprintf("%s li: %d lp: %d n: %d", i.debugMessage, i.cursorLineIndex, i.cursorLinePos, len(i.lines))
for idx, char := range position {
buf.Set(i.innerArea.Min.X+i.innerArea.Dx()-len(position) + idx,
i.innerArea.Min.Y+i.innerArea.Dy()-1,
Cell{Ch: char, Fg: i.TextFgColor, Bg: i.TextBgColor})
}
}
*/
return buf
}
func (i *Input) getCharString(s string) string {
if val, ok := i.SpecialChars[s]; ok {
return val
} else {
return s
}
}
func (i *Input) getInputEvt(key string) EvtInput {
return EvtInput{
KeyStr: key,
LineText: i.lines[i.cursorLineIndex],
CursorPosition: i.cursorLinePos,
LineIndex: i.cursorLineIndex,
}
}
| {
// if we are at the beginning of the line move the cursor to the previous line
if i.cursorLinePos == 0 {
origLine := i.cursorLineIndex
i.moveUp()
if origLine > 0 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
}
return
}
i.cursorLinePos--
} | identifier_body |
input.go | package termui
import (
"github.com/nsf/termbox-go"
"strconv"
"strings"
)
// default mappings between /sys/kbd events and multi-line inputs
var multiLineCharMap = map[string]string{
"<space>": " ",
"<tab>": "\t",
"<enter>": "\n",
"<escape>": "",
}
// default mappings between /sys/kbd events and single line inputs
var singleLineCharMap = map[string]string{
"<space>": " ",
"<tab>": "\t",
"<enter>": "",
"<escape>": "",
}
const NEW_LINE = "\n"
const LINE_NO_MIN_SPACE = 1000
// EvtInput defines the structure for the /input/* events. The event contains the last keystroke, the full text
// for the current line, and the position of the cursor in the current line as well as the index of the current
// line in the full text of the input
type EvtInput struct {
KeyStr string
LineText string
CursorPosition int
LineIndex int
}
// Input is the main object for a text input. The object exposes the following public properties:
// TextFgColor: color for the text.
// TextBgColor: background color for the text box.
// IsCapturing: true if the input is currently capturing keyboard events, this is controlled by the StartCapture and
// StopCapture methods.
// IsMultiline: Whether we should accept multiple lines of input or this is a singe line form field.
// TextBuilder: An implementation of the TextBuilder interface to customize the look of the text on the screen.
// SpecialChars: a map[string]string of characters from the /sys/kbd events to actual strings in the content.
// Name: When specified, the Input uses its name to propagate events, for example /input/<name>/kbd.
type Input struct {
Block
TextFgColor Attribute
TextBgColor Attribute
IsCapturing bool
IsMultiLine bool
TextBuilder TextBuilder
SpecialChars map[string]string
ShowLineNo bool
Name string
CursorX int
CursorY int
//DebugMode bool
//debugMessage string
// internal vars
lines []string
cursorLineIndex int
cursorLinePos int
}
// NewInput returns a new, initialized Input object. The method receives the initial content for the input (if any)
// and whether it should be initialized as a multi-line innput field or not
func NewInput(s string, isMultiLine bool) *Input {
textArea := &Input{
Block: *NewBlock(),
TextFgColor: ThemeAttr("par.text.fg"),
TextBgColor: ThemeAttr("par.text.bg"),
TextBuilder: NewMarkdownTxBuilder(),
IsMultiLine: isMultiLine,
ShowLineNo: false,
cursorLineIndex: 0,
cursorLinePos: 0,
}
if s != "" {
textArea.SetText(s)
}
if isMultiLine {
textArea.SpecialChars = multiLineCharMap
} else {
textArea.SpecialChars = singleLineCharMap
}
return textArea
}
// StartCapture begins catching events from the /sys/kbd stream and updates the content of the Input field. While
// capturing events, the Input field also publishes its own event stream under the /input/kbd path.
func (i *Input) StartCapture() {
i.IsCapturing = true
Handle("/sys/kbd", func(e Event) {
if i.IsCapturing {
key := e.Data.(EvtKbd).KeyStr
switch key {
case "<up>":
i.moveUp()
case "<down>":
i.moveDown()
case "<left>":
i.moveLeft()
case "<right>":
i.moveRight()
case "C-8":
i.backspace()
default:
// If it's a CTRL something we don't handle then just ignore it
if strings.HasPrefix(key, "C-") {
break
}
newString := i.getCharString(key)
i.addString(newString)
}
if i.Name == "" {
SendCustomEvt("/input/kbd", i.getInputEvt(key))
} else {
SendCustomEvt("/input/" + i.Name + "/kbd", i.getInputEvt(key))
}
Render(i)
}
})
}
// StopCapture tells the Input field to stop accepting events from the /sys/kbd stream
func (i *Input) StopCapture() {
i.IsCapturing = false
}
// Text returns the text of the input field as a string
func (i *Input) Text() string {
if len(i.lines) == 0 {
return ""
}
if len(i.lines) == 1 {
return i.lines[0]
}
if i.IsMultiLine {
return strings.Join(i.lines, NEW_LINE)
} else {
// we should never get here!
return i.lines[0]
}
}
func (i *Input) SetText(text string) {
i.lines = strings.Split(text, NEW_LINE)
}
// Lines returns the slice of strings with the content of the input field. By default lines are separated by \n
func (i *Input) Lines() []string {
return i.lines
}
// Private methods for the input field
// TODO: handle delete key
func (i *Input) backspace() {
curLine := i.lines[i.cursorLineIndex]
// at the beginning of the buffer, nothing to do
if len(curLine) == 0 && i.cursorLineIndex == 0 {
return
}
// at the beginning of a line somewhere in the buffer
if i.cursorLinePos == 0 {
prevLine := i.lines[i.cursorLineIndex-1]
// remove the newline character from the prevline
prevLine = prevLine[:len(curLine)-1] + curLine
i.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
return
}
// I'm at the end of a line
if i.cursorLinePos == len(curLine)-1 {
i.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]
i.cursorLinePos--
return
}
// I'm in the middle of a line
i.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]
i.cursorLinePos--
}
func (i *Input) addString(key string) {
if len(i.lines) > 0 {
if key == NEW_LINE {
// special case when we go back to the beginning of a buffer with multiple lines, prepend a new line
if i.cursorLineIndex == 0 && len(i.lines) > 1 {
i.lines = append([]string{""}, i.lines...)
// this case handles newlines at the end of the file or in the middle of the file
} else {
newString := ""
// if we are inserting a newline in a populated line then set what goes into the new line
// and what stays in the current line
if i.cursorLinePos < len(i.lines[i.cursorLineIndex]) |
// append a newline in the current position with the content we computed in the previous if statement
i.lines = append(
i.lines[:i.cursorLineIndex+1],
append(
[]string{newString},
i.lines[i.cursorLineIndex+1:]...,
)...,
)
}
// increment the line index, reset the cursor to the beginning and return to skip the next step
i.cursorLineIndex++
i.cursorLinePos = 0
return
}
// cursor is at the end of the line
if i.cursorLinePos == len(i.lines[i.cursorLineIndex]) {
//i.debugMessage ="end"
i.lines[i.cursorLineIndex] += key
// cursor at the beginning of the line
} else if i.cursorLinePos == 0 {
//i.debugMessage = "beginning"
i.lines[i.cursorLineIndex] = key + i.lines[i.cursorLineIndex]
// cursor in the middle of the line
} else {
//i.debugMessage = "middle"
before := i.lines[i.cursorLineIndex][:i.cursorLinePos]
after := i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = before + key + after
}
i.cursorLinePos += len(key)
} else {
//i.debugMessage = "newline"
i.lines = append(i.lines, key)
i.cursorLinePos += len(key)
}
}
func (i *Input) moveUp() {
// if we are already on the first line then just move the cursor to the beginning
if i.cursorLineIndex == 0 {
i.cursorLinePos = 0
return
}
// The previous line is just as long, we can move to the same position in the line
prevLine := i.lines[i.cursorLineIndex-1]
if len(prevLine) >= i.cursorLinePos {
i.cursorLineIndex--
} else {
// otherwise we move the cursor to the end of the previous line
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
}
}
func (i *Input) moveDown() {
// we are already on the last line, we just need to move the position to the end of the line
if i.cursorLineIndex == len(i.lines)-1 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
return
}
// check if the cursor can move to the same position in the next line, otherwise move it to the end
nextLine := i.lines[i.cursorLineIndex+1]
if len(nextLine) >= i.cursorLinePos {
i.cursorLineIndex++
} else {
i.cursorLineIndex++
i.cursorLinePos = len(nextLine) - 1
}
}
func (i *Input) moveLeft() {
// if we are at the beginning of the line move the cursor to the previous line
if i.cursorLinePos == 0 {
origLine := i.cursorLineIndex
i.moveUp()
if origLine > 0 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
}
return
}
i.cursorLinePos--
}
func (i *Input) moveRight() {
// if we are at the end of the line move to the next
if i.cursorLinePos >= len(i.lines[i.cursorLineIndex]) {
origLine := i.cursorLineIndex
i.moveDown()
if origLine < len(i.lines)-1 {
i.cursorLinePos = 0
}
return
}
i.cursorLinePos++
}
// Buffer implements Bufferer interface.
func (i *Input) Buffer() Buffer {
buf := i.Block.Buffer()
// offset used to display the line numbers
textXOffset := 0
bufferLines := i.lines[:]
firstLine := 0
lastLine := i.innerArea.Dy()
if i.IsMultiLine {
if i.cursorLineIndex >= lastLine {
firstLine += i.cursorLineIndex - lastLine + 1
lastLine += i.cursorLineIndex - lastLine + 1
}
if len(i.lines) < lastLine {
bufferLines = i.lines[firstLine:]
} else {
bufferLines = i.lines[firstLine:lastLine]
}
}
if i.ShowLineNo {
// forcing space for up to 1K
if lastLine < LINE_NO_MIN_SPACE {
textXOffset = len(strconv.Itoa(LINE_NO_MIN_SPACE)) + 2
} else {
textXOffset = len(strconv.Itoa(lastLine)) + 2 // one space at the beginning and one at the end
}
}
text := strings.Join(bufferLines, NEW_LINE)
// if the last line is empty then we add a fake space to make sure line numbers are displayed
if len(bufferLines) > 0 && bufferLines[len(bufferLines)-1] == "" && i.ShowLineNo {
text += " "
}
fg, bg := i.TextFgColor, i.TextBgColor
cs := i.TextBuilder.Build(text, fg, bg)
y, x, n := 0, 0, 0
lineNoCnt := 1
for n < len(cs) {
w := cs[n].Width()
if x == 0 && i.ShowLineNo {
curLineNoString := " " + strconv.Itoa(lineNoCnt) +
strings.Join(make([]string, textXOffset-len(strconv.Itoa(lineNoCnt))-1), " ")
//i.debugMessage = "Line no: " + curLineNoString
curLineNoRunes := i.TextBuilder.Build(curLineNoString, fg, bg)
for lineNo := 0; lineNo < len(curLineNoRunes); lineNo++ {
buf.Set(i.innerArea.Min.X+x+lineNo, i.innerArea.Min.Y+y, curLineNoRunes[lineNo])
}
lineNoCnt++
}
if cs[n].Ch == '\n' {
y++
n++
x = 0 // set x = 0
continue
}
buf.Set(i.innerArea.Min.X+x+textXOffset, i.innerArea.Min.Y+y, cs[n])
n++
x += w
}
cursorXOffset := i.X + textXOffset
if i.BorderLeft {
cursorXOffset++
}
cursorYOffset := i.Y// termui.TermHeight() - i.innerArea.Dy()
if i.BorderTop {
cursorYOffset++
}
if lastLine > i.innerArea.Dy() {
cursorYOffset += i.innerArea.Dy() - 1
} else {
cursorYOffset += i.cursorLineIndex
}
if i.IsCapturing {
i.CursorX = i.cursorLinePos+cursorXOffset
i.CursorY = cursorYOffset
termbox.SetCursor(i.cursorLinePos+cursorXOffset, cursorYOffset)
}
/*
if i.DebugMode {
position := fmt.Sprintf("%s li: %d lp: %d n: %d", i.debugMessage, i.cursorLineIndex, i.cursorLinePos, len(i.lines))
for idx, char := range position {
buf.Set(i.innerArea.Min.X+i.innerArea.Dx()-len(position) + idx,
i.innerArea.Min.Y+i.innerArea.Dy()-1,
Cell{Ch: char, Fg: i.TextFgColor, Bg: i.TextBgColor})
}
}
*/
return buf
}
func (i *Input) getCharString(s string) string {
if val, ok := i.SpecialChars[s]; ok {
return val
} else {
return s
}
}
func (i *Input) getInputEvt(key string) EvtInput {
return EvtInput{
KeyStr: key,
LineText: i.lines[i.cursorLineIndex],
CursorPosition: i.cursorLinePos,
LineIndex: i.cursorLineIndex,
}
}
| {
newString = i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = i.lines[i.cursorLineIndex][:i.cursorLinePos]
} | conditional_block |
input.go | package termui
import (
"github.com/nsf/termbox-go"
"strconv"
"strings"
)
// default mappings between /sys/kbd events and multi-line inputs
var multiLineCharMap = map[string]string{
"<space>": " ",
"<tab>": "\t",
"<enter>": "\n",
"<escape>": "",
}
// default mappings between /sys/kbd events and single line inputs
var singleLineCharMap = map[string]string{
"<space>": " ",
"<tab>": "\t",
"<enter>": "",
"<escape>": "",
}
const NEW_LINE = "\n"
const LINE_NO_MIN_SPACE = 1000
// EvtInput defines the structure for the /input/* events. The event contains the last keystroke, the full text
// for the current line, and the position of the cursor in the current line as well as the index of the current
// line in the full text of the input
type EvtInput struct {
KeyStr string
LineText string
CursorPosition int
LineIndex int
}
// Input is the main object for a text input. The object exposes the following public properties:
// TextFgColor: color for the text.
// TextBgColor: background color for the text box.
// IsCapturing: true if the input is currently capturing keyboard events, this is controlled by the StartCapture and
// StopCapture methods.
// IsMultiline: Whether we should accept multiple lines of input or this is a singe line form field.
// TextBuilder: An implementation of the TextBuilder interface to customize the look of the text on the screen.
// SpecialChars: a map[string]string of characters from the /sys/kbd events to actual strings in the content.
// Name: When specified, the Input uses its name to propagate events, for example /input/<name>/kbd.
type Input struct {
Block
TextFgColor Attribute
TextBgColor Attribute
IsCapturing bool
IsMultiLine bool
TextBuilder TextBuilder
SpecialChars map[string]string
ShowLineNo bool
Name string
CursorX int
CursorY int
//DebugMode bool
//debugMessage string
// internal vars
lines []string
cursorLineIndex int
cursorLinePos int
}
// NewInput returns a new, initialized Input object. The method receives the initial content for the input (if any)
// and whether it should be initialized as a multi-line innput field or not
func NewInput(s string, isMultiLine bool) *Input {
textArea := &Input{
Block: *NewBlock(),
TextFgColor: ThemeAttr("par.text.fg"),
TextBgColor: ThemeAttr("par.text.bg"),
TextBuilder: NewMarkdownTxBuilder(),
IsMultiLine: isMultiLine,
ShowLineNo: false,
cursorLineIndex: 0,
cursorLinePos: 0,
}
if s != "" {
textArea.SetText(s)
}
if isMultiLine {
textArea.SpecialChars = multiLineCharMap
} else {
textArea.SpecialChars = singleLineCharMap
}
return textArea
}
// StartCapture begins catching events from the /sys/kbd stream and updates the content of the Input field. While
// capturing events, the Input field also publishes its own event stream under the /input/kbd path.
func (i *Input) StartCapture() {
i.IsCapturing = true
Handle("/sys/kbd", func(e Event) {
if i.IsCapturing {
key := e.Data.(EvtKbd).KeyStr
switch key {
case "<up>":
i.moveUp()
case "<down>":
i.moveDown()
case "<left>":
i.moveLeft()
case "<right>":
i.moveRight()
case "C-8":
i.backspace()
default:
// If it's a CTRL something we don't handle then just ignore it
if strings.HasPrefix(key, "C-") {
break
}
newString := i.getCharString(key)
i.addString(newString)
}
if i.Name == "" {
SendCustomEvt("/input/kbd", i.getInputEvt(key))
} else {
SendCustomEvt("/input/" + i.Name + "/kbd", i.getInputEvt(key))
}
Render(i)
}
})
}
// StopCapture tells the Input field to stop accepting events from the /sys/kbd stream
func (i *Input) StopCapture() {
i.IsCapturing = false
}
// Text returns the text of the input field as a string
func (i *Input) Text() string {
if len(i.lines) == 0 {
return ""
}
if len(i.lines) == 1 {
return i.lines[0]
}
if i.IsMultiLine {
return strings.Join(i.lines, NEW_LINE)
} else {
// we should never get here!
return i.lines[0]
}
}
func (i *Input) SetText(text string) {
i.lines = strings.Split(text, NEW_LINE)
}
// Lines returns the slice of strings with the content of the input field. By default lines are separated by \n
func (i *Input) Lines() []string {
return i.lines
}
// Private methods for the input field
// TODO: handle delete key
func (i *Input) backspace() {
curLine := i.lines[i.cursorLineIndex]
// at the beginning of the buffer, nothing to do
if len(curLine) == 0 && i.cursorLineIndex == 0 {
return
}
// at the beginning of a line somewhere in the buffer
if i.cursorLinePos == 0 {
prevLine := i.lines[i.cursorLineIndex-1]
// remove the newline character from the prevline
prevLine = prevLine[:len(curLine)-1] + curLine
i.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
return
}
// I'm at the end of a line
if i.cursorLinePos == len(curLine)-1 {
i.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]
i.cursorLinePos--
return
}
// I'm in the middle of a line
i.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]
i.cursorLinePos--
}
func (i *Input) addString(key string) {
if len(i.lines) > 0 {
if key == NEW_LINE {
// special case when we go back to the beginning of a buffer with multiple lines, prepend a new line
if i.cursorLineIndex == 0 && len(i.lines) > 1 {
i.lines = append([]string{""}, i.lines...)
// this case handles newlines at the end of the file or in the middle of the file
} else {
newString := ""
// if we are inserting a newline in a populated line then set what goes into the new line
// and what stays in the current line
if i.cursorLinePos < len(i.lines[i.cursorLineIndex]) {
newString = i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = i.lines[i.cursorLineIndex][:i.cursorLinePos]
}
// append a newline in the current position with the content we computed in the previous if statement
i.lines = append(
i.lines[:i.cursorLineIndex+1],
append(
[]string{newString},
i.lines[i.cursorLineIndex+1:]...,
)...,
)
}
// increment the line index, reset the cursor to the beginning and return to skip the next step
i.cursorLineIndex++
i.cursorLinePos = 0
return
}
// cursor is at the end of the line
if i.cursorLinePos == len(i.lines[i.cursorLineIndex]) {
//i.debugMessage ="end"
i.lines[i.cursorLineIndex] += key
// cursor at the beginning of the line
} else if i.cursorLinePos == 0 {
//i.debugMessage = "beginning"
i.lines[i.cursorLineIndex] = key + i.lines[i.cursorLineIndex]
// cursor in the middle of the line
} else {
//i.debugMessage = "middle"
before := i.lines[i.cursorLineIndex][:i.cursorLinePos]
after := i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = before + key + after
}
i.cursorLinePos += len(key)
} else {
//i.debugMessage = "newline"
i.lines = append(i.lines, key)
i.cursorLinePos += len(key)
}
}
func (i *Input) moveUp() {
// if we are already on the first line then just move the cursor to the beginning
if i.cursorLineIndex == 0 {
i.cursorLinePos = 0
return
}
// The previous line is just as long, we can move to the same position in the line
prevLine := i.lines[i.cursorLineIndex-1]
if len(prevLine) >= i.cursorLinePos {
i.cursorLineIndex--
} else {
// otherwise we move the cursor to the end of the previous line
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
}
}
func (i *Input) moveDown() {
// we are already on the last line, we just need to move the position to the end of the line
if i.cursorLineIndex == len(i.lines)-1 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
return
}
// check if the cursor can move to the same position in the next line, otherwise move it to the end
nextLine := i.lines[i.cursorLineIndex+1]
if len(nextLine) >= i.cursorLinePos {
i.cursorLineIndex++
} else {
i.cursorLineIndex++
i.cursorLinePos = len(nextLine) - 1
}
}
func (i *Input) moveLeft() {
// if we are at the beginning of the line move the cursor to the previous line
if i.cursorLinePos == 0 {
origLine := i.cursorLineIndex
i.moveUp()
if origLine > 0 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
}
return
}
i.cursorLinePos--
}
func (i *Input) moveRight() {
// if we are at the end of the line move to the next
if i.cursorLinePos >= len(i.lines[i.cursorLineIndex]) {
origLine := i.cursorLineIndex
i.moveDown()
if origLine < len(i.lines)-1 {
i.cursorLinePos = 0
}
return
}
i.cursorLinePos++
}
// Buffer implements Bufferer interface.
func (i *Input) Buffer() Buffer {
buf := i.Block.Buffer()
// offset used to display the line numbers
textXOffset := 0
bufferLines := i.lines[:]
firstLine := 0
lastLine := i.innerArea.Dy()
if i.IsMultiLine {
if i.cursorLineIndex >= lastLine {
firstLine += i.cursorLineIndex - lastLine + 1
lastLine += i.cursorLineIndex - lastLine + 1
}
if len(i.lines) < lastLine {
bufferLines = i.lines[firstLine:]
} else {
bufferLines = i.lines[firstLine:lastLine]
}
}
if i.ShowLineNo {
// forcing space for up to 1K
if lastLine < LINE_NO_MIN_SPACE {
textXOffset = len(strconv.Itoa(LINE_NO_MIN_SPACE)) + 2
} else {
textXOffset = len(strconv.Itoa(lastLine)) + 2 // one space at the beginning and one at the end
}
}
text := strings.Join(bufferLines, NEW_LINE)
// if the last line is empty then we add a fake space to make sure line numbers are displayed
if len(bufferLines) > 0 && bufferLines[len(bufferLines)-1] == "" && i.ShowLineNo {
text += " "
}
fg, bg := i.TextFgColor, i.TextBgColor
cs := i.TextBuilder.Build(text, fg, bg)
y, x, n := 0, 0, 0
lineNoCnt := 1
for n < len(cs) {
w := cs[n].Width()
if x == 0 && i.ShowLineNo {
curLineNoString := " " + strconv.Itoa(lineNoCnt) +
strings.Join(make([]string, textXOffset-len(strconv.Itoa(lineNoCnt))-1), " ")
//i.debugMessage = "Line no: " + curLineNoString
curLineNoRunes := i.TextBuilder.Build(curLineNoString, fg, bg)
for lineNo := 0; lineNo < len(curLineNoRunes); lineNo++ {
buf.Set(i.innerArea.Min.X+x+lineNo, i.innerArea.Min.Y+y, curLineNoRunes[lineNo])
}
lineNoCnt++
}
if cs[n].Ch == '\n' {
y++
n++
x = 0 // set x = 0
continue
}
buf.Set(i.innerArea.Min.X+x+textXOffset, i.innerArea.Min.Y+y, cs[n])
n++
x += w
}
cursorXOffset := i.X + textXOffset
if i.BorderLeft {
cursorXOffset++
}
cursorYOffset := i.Y// termui.TermHeight() - i.innerArea.Dy()
if i.BorderTop {
cursorYOffset++
}
if lastLine > i.innerArea.Dy() {
cursorYOffset += i.innerArea.Dy() - 1
} else { | termbox.SetCursor(i.cursorLinePos+cursorXOffset, cursorYOffset)
}
/*
if i.DebugMode {
position := fmt.Sprintf("%s li: %d lp: %d n: %d", i.debugMessage, i.cursorLineIndex, i.cursorLinePos, len(i.lines))
for idx, char := range position {
buf.Set(i.innerArea.Min.X+i.innerArea.Dx()-len(position) + idx,
i.innerArea.Min.Y+i.innerArea.Dy()-1,
Cell{Ch: char, Fg: i.TextFgColor, Bg: i.TextBgColor})
}
}
*/
return buf
}
func (i *Input) getCharString(s string) string {
if val, ok := i.SpecialChars[s]; ok {
return val
} else {
return s
}
}
func (i *Input) getInputEvt(key string) EvtInput {
return EvtInput{
KeyStr: key,
LineText: i.lines[i.cursorLineIndex],
CursorPosition: i.cursorLinePos,
LineIndex: i.cursorLineIndex,
}
} | cursorYOffset += i.cursorLineIndex
}
if i.IsCapturing {
i.CursorX = i.cursorLinePos+cursorXOffset
i.CursorY = cursorYOffset | random_line_split |
input.go | package termui
import (
"github.com/nsf/termbox-go"
"strconv"
"strings"
)
// default mappings between /sys/kbd events and multi-line inputs
var multiLineCharMap = map[string]string{
"<space>": " ",
"<tab>": "\t",
"<enter>": "\n",
"<escape>": "",
}
// default mappings between /sys/kbd events and single line inputs
var singleLineCharMap = map[string]string{
"<space>": " ",
"<tab>": "\t",
"<enter>": "",
"<escape>": "",
}
const NEW_LINE = "\n"
const LINE_NO_MIN_SPACE = 1000
// EvtInput defines the structure for the /input/* events. The event contains the last keystroke, the full text
// for the current line, and the position of the cursor in the current line as well as the index of the current
// line in the full text of the input
type EvtInput struct {
KeyStr string
LineText string
CursorPosition int
LineIndex int
}
// Input is the main object for a text input. The object exposes the following public properties:
// TextFgColor: color for the text.
// TextBgColor: background color for the text box.
// IsCapturing: true if the input is currently capturing keyboard events, this is controlled by the StartCapture and
// StopCapture methods.
// IsMultiline: Whether we should accept multiple lines of input or this is a singe line form field.
// TextBuilder: An implementation of the TextBuilder interface to customize the look of the text on the screen.
// SpecialChars: a map[string]string of characters from the /sys/kbd events to actual strings in the content.
// Name: When specified, the Input uses its name to propagate events, for example /input/<name>/kbd.
type Input struct {
Block
TextFgColor Attribute
TextBgColor Attribute
IsCapturing bool
IsMultiLine bool
TextBuilder TextBuilder
SpecialChars map[string]string
ShowLineNo bool
Name string
CursorX int
CursorY int
//DebugMode bool
//debugMessage string
// internal vars
lines []string
cursorLineIndex int
cursorLinePos int
}
// NewInput returns a new, initialized Input object. The method receives the initial content for the input (if any)
// and whether it should be initialized as a multi-line innput field or not
func NewInput(s string, isMultiLine bool) *Input {
textArea := &Input{
Block: *NewBlock(),
TextFgColor: ThemeAttr("par.text.fg"),
TextBgColor: ThemeAttr("par.text.bg"),
TextBuilder: NewMarkdownTxBuilder(),
IsMultiLine: isMultiLine,
ShowLineNo: false,
cursorLineIndex: 0,
cursorLinePos: 0,
}
if s != "" {
textArea.SetText(s)
}
if isMultiLine {
textArea.SpecialChars = multiLineCharMap
} else {
textArea.SpecialChars = singleLineCharMap
}
return textArea
}
// StartCapture begins catching events from the /sys/kbd stream and updates the content of the Input field. While
// capturing events, the Input field also publishes its own event stream under the /input/kbd path.
func (i *Input) StartCapture() {
i.IsCapturing = true
Handle("/sys/kbd", func(e Event) {
if i.IsCapturing {
key := e.Data.(EvtKbd).KeyStr
switch key {
case "<up>":
i.moveUp()
case "<down>":
i.moveDown()
case "<left>":
i.moveLeft()
case "<right>":
i.moveRight()
case "C-8":
i.backspace()
default:
// If it's a CTRL something we don't handle then just ignore it
if strings.HasPrefix(key, "C-") {
break
}
newString := i.getCharString(key)
i.addString(newString)
}
if i.Name == "" {
SendCustomEvt("/input/kbd", i.getInputEvt(key))
} else {
SendCustomEvt("/input/" + i.Name + "/kbd", i.getInputEvt(key))
}
Render(i)
}
})
}
// StopCapture tells the Input field to stop accepting events from the /sys/kbd stream
func (i *Input) StopCapture() {
i.IsCapturing = false
}
// Text returns the text of the input field as a string
func (i *Input) Text() string {
if len(i.lines) == 0 {
return ""
}
if len(i.lines) == 1 {
return i.lines[0]
}
if i.IsMultiLine {
return strings.Join(i.lines, NEW_LINE)
} else {
// we should never get here!
return i.lines[0]
}
}
func (i *Input) | (text string) {
i.lines = strings.Split(text, NEW_LINE)
}
// Lines returns the slice of strings with the content of the input field. By default lines are separated by \n
func (i *Input) Lines() []string {
return i.lines
}
// Private methods for the input field
// TODO: handle delete key
func (i *Input) backspace() {
curLine := i.lines[i.cursorLineIndex]
// at the beginning of the buffer, nothing to do
if len(curLine) == 0 && i.cursorLineIndex == 0 {
return
}
// at the beginning of a line somewhere in the buffer
if i.cursorLinePos == 0 {
prevLine := i.lines[i.cursorLineIndex-1]
// remove the newline character from the prevline
prevLine = prevLine[:len(curLine)-1] + curLine
i.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
return
}
// I'm at the end of a line
if i.cursorLinePos == len(curLine)-1 {
i.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]
i.cursorLinePos--
return
}
// I'm in the middle of a line
i.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]
i.cursorLinePos--
}
func (i *Input) addString(key string) {
if len(i.lines) > 0 {
if key == NEW_LINE {
// special case when we go back to the beginning of a buffer with multiple lines, prepend a new line
if i.cursorLineIndex == 0 && len(i.lines) > 1 {
i.lines = append([]string{""}, i.lines...)
// this case handles newlines at the end of the file or in the middle of the file
} else {
newString := ""
// if we are inserting a newline in a populated line then set what goes into the new line
// and what stays in the current line
if i.cursorLinePos < len(i.lines[i.cursorLineIndex]) {
newString = i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = i.lines[i.cursorLineIndex][:i.cursorLinePos]
}
// append a newline in the current position with the content we computed in the previous if statement
i.lines = append(
i.lines[:i.cursorLineIndex+1],
append(
[]string{newString},
i.lines[i.cursorLineIndex+1:]...,
)...,
)
}
// increment the line index, reset the cursor to the beginning and return to skip the next step
i.cursorLineIndex++
i.cursorLinePos = 0
return
}
// cursor is at the end of the line
if i.cursorLinePos == len(i.lines[i.cursorLineIndex]) {
//i.debugMessage ="end"
i.lines[i.cursorLineIndex] += key
// cursor at the beginning of the line
} else if i.cursorLinePos == 0 {
//i.debugMessage = "beginning"
i.lines[i.cursorLineIndex] = key + i.lines[i.cursorLineIndex]
// cursor in the middle of the line
} else {
//i.debugMessage = "middle"
before := i.lines[i.cursorLineIndex][:i.cursorLinePos]
after := i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = before + key + after
}
i.cursorLinePos += len(key)
} else {
//i.debugMessage = "newline"
i.lines = append(i.lines, key)
i.cursorLinePos += len(key)
}
}
func (i *Input) moveUp() {
// if we are already on the first line then just move the cursor to the beginning
if i.cursorLineIndex == 0 {
i.cursorLinePos = 0
return
}
// The previous line is just as long, we can move to the same position in the line
prevLine := i.lines[i.cursorLineIndex-1]
if len(prevLine) >= i.cursorLinePos {
i.cursorLineIndex--
} else {
// otherwise we move the cursor to the end of the previous line
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
}
}
func (i *Input) moveDown() {
// we are already on the last line, we just need to move the position to the end of the line
if i.cursorLineIndex == len(i.lines)-1 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
return
}
// check if the cursor can move to the same position in the next line, otherwise move it to the end
nextLine := i.lines[i.cursorLineIndex+1]
if len(nextLine) >= i.cursorLinePos {
i.cursorLineIndex++
} else {
i.cursorLineIndex++
i.cursorLinePos = len(nextLine) - 1
}
}
func (i *Input) moveLeft() {
// if we are at the beginning of the line move the cursor to the previous line
if i.cursorLinePos == 0 {
origLine := i.cursorLineIndex
i.moveUp()
if origLine > 0 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
}
return
}
i.cursorLinePos--
}
func (i *Input) moveRight() {
// if we are at the end of the line move to the next
if i.cursorLinePos >= len(i.lines[i.cursorLineIndex]) {
origLine := i.cursorLineIndex
i.moveDown()
if origLine < len(i.lines)-1 {
i.cursorLinePos = 0
}
return
}
i.cursorLinePos++
}
// Buffer implements Bufferer interface.
func (i *Input) Buffer() Buffer {
buf := i.Block.Buffer()
// offset used to display the line numbers
textXOffset := 0
bufferLines := i.lines[:]
firstLine := 0
lastLine := i.innerArea.Dy()
if i.IsMultiLine {
if i.cursorLineIndex >= lastLine {
firstLine += i.cursorLineIndex - lastLine + 1
lastLine += i.cursorLineIndex - lastLine + 1
}
if len(i.lines) < lastLine {
bufferLines = i.lines[firstLine:]
} else {
bufferLines = i.lines[firstLine:lastLine]
}
}
if i.ShowLineNo {
// forcing space for up to 1K
if lastLine < LINE_NO_MIN_SPACE {
textXOffset = len(strconv.Itoa(LINE_NO_MIN_SPACE)) + 2
} else {
textXOffset = len(strconv.Itoa(lastLine)) + 2 // one space at the beginning and one at the end
}
}
text := strings.Join(bufferLines, NEW_LINE)
// if the last line is empty then we add a fake space to make sure line numbers are displayed
if len(bufferLines) > 0 && bufferLines[len(bufferLines)-1] == "" && i.ShowLineNo {
text += " "
}
fg, bg := i.TextFgColor, i.TextBgColor
cs := i.TextBuilder.Build(text, fg, bg)
y, x, n := 0, 0, 0
lineNoCnt := 1
for n < len(cs) {
w := cs[n].Width()
if x == 0 && i.ShowLineNo {
curLineNoString := " " + strconv.Itoa(lineNoCnt) +
strings.Join(make([]string, textXOffset-len(strconv.Itoa(lineNoCnt))-1), " ")
//i.debugMessage = "Line no: " + curLineNoString
curLineNoRunes := i.TextBuilder.Build(curLineNoString, fg, bg)
for lineNo := 0; lineNo < len(curLineNoRunes); lineNo++ {
buf.Set(i.innerArea.Min.X+x+lineNo, i.innerArea.Min.Y+y, curLineNoRunes[lineNo])
}
lineNoCnt++
}
if cs[n].Ch == '\n' {
y++
n++
x = 0 // set x = 0
continue
}
buf.Set(i.innerArea.Min.X+x+textXOffset, i.innerArea.Min.Y+y, cs[n])
n++
x += w
}
cursorXOffset := i.X + textXOffset
if i.BorderLeft {
cursorXOffset++
}
cursorYOffset := i.Y// termui.TermHeight() - i.innerArea.Dy()
if i.BorderTop {
cursorYOffset++
}
if lastLine > i.innerArea.Dy() {
cursorYOffset += i.innerArea.Dy() - 1
} else {
cursorYOffset += i.cursorLineIndex
}
if i.IsCapturing {
i.CursorX = i.cursorLinePos+cursorXOffset
i.CursorY = cursorYOffset
termbox.SetCursor(i.cursorLinePos+cursorXOffset, cursorYOffset)
}
/*
if i.DebugMode {
position := fmt.Sprintf("%s li: %d lp: %d n: %d", i.debugMessage, i.cursorLineIndex, i.cursorLinePos, len(i.lines))
for idx, char := range position {
buf.Set(i.innerArea.Min.X+i.innerArea.Dx()-len(position) + idx,
i.innerArea.Min.Y+i.innerArea.Dy()-1,
Cell{Ch: char, Fg: i.TextFgColor, Bg: i.TextBgColor})
}
}
*/
return buf
}
func (i *Input) getCharString(s string) string {
if val, ok := i.SpecialChars[s]; ok {
return val
} else {
return s
}
}
func (i *Input) getInputEvt(key string) EvtInput {
return EvtInput{
KeyStr: key,
LineText: i.lines[i.cursorLineIndex],
CursorPosition: i.cursorLinePos,
LineIndex: i.cursorLineIndex,
}
}
| SetText | identifier_name |
blk_device.rs | //!
//! This module implements the list_block_devices() gRPC method
//! for listing available disk devices on the current host.
//!
//! The relevant information is obtained via udev.
//! The method works by iterating through udev records and selecting block
//! (ie. SUBSYSTEM=block) devices that represent either disks or disk
//! partitions. For each such device, it is then determined as to whether the
//! device is available for use.
//!
//! A device is currently deemed to be "available" if it satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&& !mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s| !s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn | (
parent: Option<&str>,
disk: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_parent(disk)?;
enumerator.match_property("DEVTYPE", "partition")?;
for entry in enumerator.scan_devices()? {
if let Some(device) = new_device(parent, true, &entry, mounts) {
list.push(device);
}
}
Ok(list)
}
/// Return a list of block devices on the current host.
/// The <all> parameter controls whether to return list containing
/// all matching devices, or just those deemed to be available.
pub async fn list_block_devices(all: bool) -> Result<Vec<BlockDevice>, Error> {
let mounts = get_mounts()?;
get_disks(all, &mounts)
}
| get_partitions | identifier_name |
blk_device.rs | //!
//! This module implements the list_block_devices() gRPC method
//! for listing available disk devices on the current host.
//!
//! The relevant information is obtained via udev.
//! The method works by iterating through udev records and selecting block
//! (ie. SUBSYSTEM=block) devices that represent either disks or disk
//! partitions. For each such device, it is then determined as to whether the
//! device is available for use.
//!
//! A device is currently deemed to be "available" if it satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
|
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&& !mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s| !s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn get_partitions(
parent: Option<&str>,
disk: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_parent(disk)?;
enumerator.match_property("DEVTYPE", "partition")?;
for entry in enumerator.scan_devices()? {
if let Some(device) = new_device(parent, true, &entry, mounts) {
list.push(device);
}
}
Ok(list)
}
/// Return a list of block devices on the current host.
/// The <all> parameter controls whether to return list containing
/// all matching devices, or just those deemed to be available.
pub async fn list_block_devices(all: bool) -> Result<Vec<BlockDevice>, Error> {
let mounts = get_mounts()?;
get_disks(all, &mounts)
}
| {
return None;
} | conditional_block |
blk_device.rs | //!
//! This module implements the list_block_devices() gRPC method
//! for listing available disk devices on the current host.
//!
//! The relevant information is obtained via udev.
//! The method works by iterating through udev records and selecting block
//! (ie. SUBSYSTEM=block) devices that represent either disks or disk
//! partitions. For each such device, it is then determined as to whether the
//! device is available for use.
//!
//! A device is currently deemed to be "available" if it satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&& !mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s| !s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> |
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn get_partitions(
parent: Option<&str>,
disk: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_parent(disk)?;
enumerator.match_property("DEVTYPE", "partition")?;
for entry in enumerator.scan_devices()? {
if let Some(device) = new_device(parent, true, &entry, mounts) {
list.push(device);
}
}
Ok(list)
}
/// Return a list of block devices on the current host.
/// The <all> parameter controls whether to return list containing
/// all matching devices, or just those deemed to be available.
pub async fn list_block_devices(all: bool) -> Result<Vec<BlockDevice>, Error> {
let mounts = get_mounts()?;
get_disks(all, &mounts)
}
| {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
} | identifier_body |
blk_device.rs | //!
//! This module implements the list_block_devices() gRPC method
//! for listing available disk devices on the current host.
//!
//! The relevant information is obtained via udev.
//! The method works by iterating through udev records and selecting block
//! (ie. SUBSYSTEM=block) devices that represent either disks or disk
//! partitions. For each such device, it is then determined as to whether the
//! device is available for use.
//!
//! A device is currently deemed to be "available" if it satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&& !mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s| !s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available, |
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn get_partitions(
parent: Option<&str>,
disk: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_parent(disk)?;
enumerator.match_property("DEVTYPE", "partition")?;
for entry in enumerator.scan_devices()? {
if let Some(device) = new_device(parent, true, &entry, mounts) {
list.push(device);
}
}
Ok(list)
}
/// Return a list of block devices on the current host.
/// The <all> parameter controls whether to return list containing
/// all matching devices, or just those deemed to be available.
pub async fn list_block_devices(all: bool) -> Result<Vec<BlockDevice>, Error> {
let mounts = get_mounts()?;
get_disks(all, &mounts)
} | });
}
None
} | random_line_split |
tpm.rs | // SPDX-License-Identifier: Apache-2.0
// Copyright 2021 Keylime Authors
#[macro_use]
use log::*;
use std::convert::{TryFrom, TryInto};
use std::io::prelude::*;
use std::str::FromStr;
use crate::{
common::config_get, quotes_handler::KeylimeIdQuote,
Error as KeylimeError, QuoteData, Result,
};
use actix_web::web::Data;
use openssl::{
hash::{Hasher, MessageDigest},
memcmp,
pkey::{Id, PKeyRef, Public},
rsa::Rsa,
};
use flate2::{write::ZlibEncoder, Compression};
use tss_esapi::{
abstraction::{ak, cipher::Cipher, ek, DefaultKey},
attributes::session::SessionAttributesBuilder,
constants::{
session_type::SessionType,
tss::{TPM2_ALG_NULL, TPM2_ST_ATTEST_QUOTE},
},
handles::{AuthHandle, KeyHandle, PcrHandle, SessionHandle},
interface_types::{
algorithm::{AsymmetricAlgorithm, HashingAlgorithm, SignatureScheme},
session_handles::AuthSession,
},
structures::{
Digest, DigestValues, EncryptedSecret, IDObject, Name,
PcrSelectionList, PcrSelectionListBuilder, PcrSlot,
},
tcti_ldr::TctiNameConf,
tss2_esys::{
Tss2_MU_TPM2B_PUBLIC_Marshal, Tss2_MU_TPMS_ATTEST_Unmarshal,
Tss2_MU_TPMT_SIGNATURE_Marshal, TPM2B_ATTEST, TPM2B_PUBLIC,
TPML_DIGEST, TPML_PCR_SELECTION, TPMS_ATTEST, TPMS_SCHEME_HASH,
TPMT_SIGNATURE, TPMT_SIG_SCHEME, TPMU_SIG_SCHEME,
},
utils::{PcrData, Signature},
Context,
};
/*
* Input: None
* Return: Connection context
*
* Example call:
* let mut ctx = tpm::get_tpm2_ctx();
*/
pub(crate) fn get_tpm2_ctx() -> Result<Context> {
let tcti_path = match std::env::var("TCTI") {
Ok(val) => val,
Err(_) => if std::path::Path::new("/dev/tpmrm0").exists() {
"device:/dev/tpmrm0"
} else {
"device:/dev/tpm0"
}
.to_string(),
};
let tcti = TctiNameConf::from_str(&tcti_path)?;
Context::new(tcti).map_err(|e| e.into())
}
/*
* Input: Connection context, asymmetric algo (optional)
* Return: (Key handle, public cert, TPM public object)
* Example call:
* let (key, cert, tpm_pub) = tpm::create_ek(context, Some(AsymmetricAlgorithm::Rsa))
*/
pub(crate) fn create_ek(
context: &mut Context,
alg: Option<AsymmetricAlgorithm>,
) -> Result<(KeyHandle, Option<Vec<u8>>, Vec<u8>)> {
// Set encryption algorithm
let alg = match alg {
Some(a) => a,
None => {
match config_get(
"cloud_agent",
"tpm_encryption_alg",
)?
.as_str()
{
"rsa" => AsymmetricAlgorithm::Rsa,
"ecc" => AsymmetricAlgorithm::Ecc,
_ => return Err(KeylimeError::Configuration(String::from("Encryption algorithm provided in keylime.conf is not supported")))
}
}
};
// Retrieve EK handle, EK pub cert, and TPM pub object
let handle = ek::create_ek_object(context, alg, DefaultKey)?;
let cert = match ek::retrieve_ek_pubcert(context, alg) {
Ok(v) => Some(v),
Err(_) => {
warn!("No EK certificate found in TPM NVRAM");
None
}
};
let (tpm_pub, _, _) = context.read_public(handle)?;
let tpm_pub_vec = pub_to_vec(tpm_pub);
Ok((handle, cert, tpm_pub_vec))
}
fn unmarshal_tpms_attest(val: &[u8]) -> Result<TPMS_ATTEST> {
let mut resp = TPMS_ATTEST::default();
let mut offset = 0u64;
unsafe {
let res = Tss2_MU_TPMS_ATTEST_Unmarshal(
val[..].as_ptr(),
val.len() as u64,
&mut offset,
&mut resp,
);
if res != 0 {
panic!("Error converting"); //#[allow_ci]
}
}
Ok(resp)
}
// Multiple TPM objects need to be marshaled for Quoting. This macro will
// create the appropriate functions when called below. The macro is intended
// to help with any future similar marshaling functions.
macro_rules! create_marshal_fn {
($func:ident, $tpmobj:ty, $marshal:ident) => {
fn $func(t: $tpmobj) -> Vec<u8> {
let mut offset = 0u64;
let size = std::mem::size_of::<$tpmobj>();
let mut tpm_vec = Vec::with_capacity(size);
unsafe {
let res = $marshal(
&t,
tpm_vec.as_mut_ptr(),
tpm_vec.capacity() as u64,
&mut offset,
);
if res != 0 {
panic!("out of memory or invalid data from TPM"); //#[allow_ci]
}
// offset is a buffer, so after marshaling function is called it holds the
// number of bytes written to the vector
tpm_vec.set_len(offset as usize);
}
tpm_vec
}
};
}
// These marshaling functions use the macro above and are based on this format:
// https://github.com/fedora-iot/clevis-pin-tpm2/blob/main/src/tpm_objects.rs#L64
// ... and on these marshaling functions:
// https://github.com/parallaxsecond/rust-tss-esapi/blob/main/tss-esapi-sys/src/ \
// bindings/x86_64-unknown-linux-gnu.rs#L16010
//
// Functions can be created using the following form:
// create_marshal_fn!(name_of_function_to_create, struct_to_be_marshaled, marshaling_function);
//
create_marshal_fn!(pub_to_vec, TPM2B_PUBLIC, Tss2_MU_TPM2B_PUBLIC_Marshal);
create_marshal_fn!(
sig_to_vec,
TPMT_SIGNATURE,
Tss2_MU_TPMT_SIGNATURE_Marshal
);
// Recreate how tpm2-tools creates the PCR out file. Roughly, this is a
// TPML_PCR_SELECTION + number of TPML_DIGESTS + TPML_DIGESTs.
// Reference:
// https://github.com/tpm2-software/tpm2-tools/blob/master/tools/tpm2_quote.c#L47-L91
//
// Note: tpm2-tools does not use its own documented marshaling functions for this output,
// so the below code recreates the idiosyncratic format tpm2-tools expects. The lengths
// of the vectors were determined by introspection into running tpm2-tools code. This is
// not ideal, and we should aim to move away from it if possible.
pub(crate) fn pcrdata_to_vec(
selection_list: PcrSelectionList,
pcrdata: PcrData,
) -> Vec<u8> {
let pcrsel: TPML_PCR_SELECTION = selection_list.into();
let pcrsel_vec: [u8; 132] = unsafe { std::mem::transmute(pcrsel) };
let digest: TPML_DIGEST = pcrdata.into();
let digest_vec: [u8; 532] = unsafe { std::mem::transmute(digest) };
let mut data_vec =
Vec::with_capacity(pcrsel_vec.len() + 4 + digest_vec.len());
data_vec.extend(&pcrsel_vec);
data_vec.extend(&1u32.to_le_bytes());
data_vec.extend(&digest_vec);
data_vec
}
/* Converts a hex value in the form of a string (ex. from keylime.conf's
* ek_handle) to a key handle.
*
* Input: &str
* Return: Key handle
*
* Example call:
* let ek_handle = tpm::ek_from_hex_str("0x81000000");
*/
pub(crate) fn ek_from_hex_str(val: &str) -> Result<KeyHandle> {
let val = val.trim_start_matches("0x");
Ok(KeyHandle::from(u32::from_str_radix(val, 16)?))
}
/* Creates AK and returns a tuple of its handle, name, and tpm2b_public as a vector.
*
* Input: Connection context, parent key's KeyHandle.
* Return: (Key handle, key name, TPM public object as a vector)
* Example call:
* let (key, name, tpm_pub) = tpm::create_ak(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic != TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version != 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn default() -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues> {
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
}
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000, which translates to 0x408000.
//
// The mask is a string because it is sent as a string from the tenant
// and verifier. The output from this function can be used to call a
// Quote from the TSS ESAPI.
//
pub(crate) fn read_mask(mask: &str) -> Result<Vec<PcrSlot>> {
let mut pcrs = Vec::new();
let num = u32::from_str_radix(mask.trim_start_matches("0x"), 16)?;
// check which bits are set
for i in 0..32 {
if num & (1 << i) != 0 {
pcrs.push(
match i {
0 => PcrSlot::Slot0,
1 => PcrSlot::Slot1,
2 => PcrSlot::Slot2,
3 => PcrSlot::Slot3,
4 => PcrSlot::Slot4,
5 => PcrSlot::Slot5,
6 => PcrSlot::Slot6,
7 => PcrSlot::Slot7,
8 => PcrSlot::Slot8,
9 => PcrSlot::Slot9,
10 => PcrSlot::Slot10,
11 => PcrSlot::Slot11,
12 => PcrSlot::Slot12,
13 => PcrSlot::Slot13,
14 => PcrSlot::Slot14,
15 => PcrSlot::Slot15,
16 => PcrSlot::Slot16,
17 => PcrSlot::Slot17,
18 => PcrSlot::Slot18,
19 => PcrSlot::Slot19, | 23 => PcrSlot::Slot23,
bit => return Err(KeylimeError::Other(format!("malformed mask in integrity quote: only pcrs 0-23 can be included, but mask included pcr {:?}", bit))),
},
)
}
}
Ok(pcrs)
}
// This encodes a quote string as input to Python Keylime's quote checking functionality.
// The quote, signature, and pcr blob are concatenated with ':' separators. To match the
// expected format, the quote, signature, and pcr blob must be individually compressed
// with zlib at the default compression level and then base64 encoded before concatenation.
//
// Reference:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6 \
// /keylime/tpm/tpm_main.py#L964-L975
pub(crate) fn encode_quote_string(
att: TPM2B_ATTEST,
sig: Signature,
pcrs_read: PcrSelectionList,
pcr_data: PcrData,
) -> Result<String> {
// marshal structs to vec in expected formats. these formats are
// dictated by tpm2_tools.
let att_vec = &att.attestationData[0..att.size as usize];
let sig_vec = sig_to_vec(sig.try_into()?);
let pcr_vec = pcrdata_to_vec(pcrs_read, pcr_data);
// zlib compression
let mut att_comp = ZlibEncoder::new(Vec::new(), Compression::default());
att_comp.write_all(att_vec);
let att_comp_finished = att_comp.finish()?;
let mut sig_comp = ZlibEncoder::new(Vec::new(), Compression::default());
sig_comp.write_all(&sig_vec);
let sig_comp_finished = sig_comp.finish()?;
let mut pcr_comp = ZlibEncoder::new(Vec::new(), Compression::default());
pcr_comp.write_all(&pcr_vec);
let pcr_comp_finished = pcr_comp.finish()?;
// base64 encoding
let att_str = base64::encode(att_comp_finished);
let sig_str = base64::encode(sig_comp_finished);
let pcr_str = base64::encode(pcr_comp_finished);
// create concatenated string
let mut quote = String::new();
quote.push_str(&att_str);
quote.push(':');
quote.push_str(&sig_str);
quote.push(':');
quote.push_str(&pcr_str);
Ok(quote)
}
// This function extends Pcr16 with the digest, then creates a PcrList
// from the given mask and pcr16.
// Note: Currently, this will build the list for both SHA256 and SHA1 as
// necessary for the Python components of Keylime.
pub(crate) fn build_pcr_list(
context: &mut Context,
digest: DigestValues,
mask: Option<&str>,
) -> Result<PcrSelectionList> {
// extend digest into pcr16
context.execute_with_nullauth_session(|ctx| {
ctx.pcr_reset(PcrHandle::Pcr16)?;
ctx.pcr_extend(PcrHandle::Pcr16, digest.to_owned())
})?;
// translate mask to vec of pcrs
let mut pcrs = match mask {
Some(m) => read_mask(m)?,
None => Vec::new(),
};
// add pcr16 if it isn't in the vec already
if !pcrs.iter().any(|&pcr| pcr == PcrSlot::Slot16) {
let mut slot16 = vec![PcrSlot::Slot16];
pcrs.append(&mut slot16);
}
let ima_pcr_index = pcrs.iter().position(|&pcr| pcr == PcrSlot::Slot10);
let mut pcrlist = PcrSelectionListBuilder::new();
// remove IMA pcr before selecting for sha256 bank
if let Some(ima_pcr_index) = ima_pcr_index {
let _ = pcrs.remove(ima_pcr_index);
// add only IMA pcr for sha1 bank
let mut sha1_pcrs = vec![PcrSlot::Slot10];
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha1, &sha1_pcrs);
}
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha256, &pcrs);
let pcrlist = pcrlist.build();
Ok(pcrlist)
}
// The pcr blob corresponds to the pcr out file that records the list of PCR values,
// specified by tpm2tools, ex. 'tpm2_quote ... -o <pcrfilename>'. Read more here:
// https://github.com/tpm2-software/tpm2-tools/blob/master/man/tpm2_quote.1.md
//
// It is required by Python Keylime's check_quote functionality. For how the quote is
// checked, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L990
//
// For how the quote is created, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L965
//
pub(crate) fn make_pcr_blob(
context: &mut Context,
pcrlist: PcrSelectionList,
) -> Result<(PcrSelectionList, PcrData)> {
let (_, pcrs_read, pcr_data) =
context.execute_without_session(|ctx| ctx.pcr_read(&pcrlist))?;
if pcrs_read != pcrlist {
return Err(KeylimeError::Other(format!(
"could not read all pcrs; requested: {:?}, read: {:?}",
pcrlist, pcrs_read
)));
}
Ok((pcrs_read, pcr_data))
}
// Takes a TSS ESAPI HashingAlgorithm and returns the corresponding OpenSSL
// MessageDigest.
fn hash_alg_to_message_digest(
hash_alg: HashingAlgorithm,
) -> Result<MessageDigest> {
match hash_alg {
HashingAlgorithm::Sha256 => Ok(MessageDigest::sha256()),
HashingAlgorithm::Sha1 => Ok(MessageDigest::sha1()),
other => Err(KeylimeError::Other(format!(
"Unsupported hashing algorithm: {:?}",
other
))),
}
}
fn check_if_pcr_data_and_attestation_match(
hash_algo: HashingAlgorithm,
pcr_data: &PcrData,
attestation: &TPM2B_ATTEST,
) -> Result<bool> {
let pcr_data = TPML_DIGEST::try_from(pcr_data.clone())?;
let attestation = unmarshal_tpms_attest(&attestation.attestationData)?;
if attestation.type_ != TPM2_ST_ATTEST_QUOTE {
return Err(KeylimeError::Other(format!(
"Expected attestation type TPM2_ST_ATTEST_QUOTE, got {:?}",
attestation.type_
)));
}
let quote = unsafe { attestation.attested.quote };
let attested_pcr = quote.pcrDigest;
let attested_pcr = &attested_pcr.buffer[..attested_pcr.size as usize];
let mut hasher = Hasher::new(hash_alg_to_message_digest(hash_algo)?)?;
for i in 0..pcr_data.count {
let pcr = pcr_data.digests[i as usize];
hasher.update(&pcr.buffer[..pcr.size as usize])?;
}
let pcr_digest = hasher.finish()?;
log::trace!(
"Attested to PCR digest: {:?}, read PCR digest: {:?}",
attested_pcr,
pcr_digest,
);
Ok(memcmp::eq(attested_pcr, &pcr_digest))
}
const NUM_ATTESTATION_ATTEMPTS: i32 = 5;
fn perform_quote_and_pcr_read(
mut context: &mut Context,
ak_handle: KeyHandle,
nonce: &[u8],
pcrlist: PcrSelectionList,
) -> Result<(TPM2B_ATTEST, Signature, PcrSelectionList, PcrData)> {
let sig_scheme = get_sig_scheme(TpmSigScheme::default())?;
let nonce = nonce.try_into()?;
for attempt in 0..NUM_ATTESTATION_ATTEMPTS {
// TSS ESAPI quote does not create pcr blob, so create it separately
let (pcrs_read, pcr_data) =
make_pcr_blob(&mut context, pcrlist.clone())?;
// create quote
let (attestation, sig) = context.quote(
ak_handle,
&nonce,
sig_scheme,
pcrs_read.clone(),
)?;
// Check whether the attestation and pcr_data match
if (check_if_pcr_data_and_attestation_match(
HashingAlgorithm::Sha256,
&pcr_data,
&attestation,
)?) {
return Ok((attestation, sig, pcrs_read, pcr_data));
}
log::info!(
"PCR data and attestation data mismatched on attempt {}",
attempt
);
}
log::error!("PCR data and attestation data mismatched on all {} attempts, giving up", NUM_ATTESTATION_ATTEMPTS);
Err(KeylimeError::Other(
"Consistent race condition: can't make attestation match pcr_data"
.to_string(),
))
}
// Despite the return type, this function is used for both Identity and
// Integrity Quotes. The Quote handler will add additional information to
// turn an Identity Quote into an Integrity Quote.
pub(crate) fn quote(
nonce: &[u8],
mask: Option<&str>,
data: Data<QuoteData>,
) -> Result<KeylimeIdQuote> {
let hash_alg = get_hash_alg(config_get("cloud_agent", "tpm_hash_alg")?)?;
let nk_digest = pubkey_to_tpm_digest(&data.pub_key)?;
// must unwrap here due to lock mechanism
// https://github.com/rust-lang-nursery/failure/issues/192
let mut context = data.tpmcontext.lock().unwrap(); //#[allow_ci]
let pcrlist = build_pcr_list(&mut context, nk_digest, mask)?;
let (attestation, sig, pcrs_read, pcr_data) = context
.execute_with_nullauth_session(|mut ctx| {
perform_quote_and_pcr_read(
&mut ctx,
data.ak_handle,
nonce,
pcrlist,
)
})?;
let quote = encode_quote_string(attestation, sig, pcrs_read, pcr_data)?;
let mut keylimequote = KeylimeIdQuote::default();
keylimequote.quote.push_str("e);
Ok(keylimequote)
}
#[ignore] // This will only work as an integration test because it needs keylime.conf
#[test]
fn pubkey_to_digest() {
let (key, _) = crate::crypto::rsa_generate_pair(2048).unwrap(); //#[allow_ci]
let digest = pubkey_to_tpm_digest(&key).unwrap(); //#[allow_ci]
}
#[test]
fn ek_from_hex() {
assert_eq!(
ek_from_hex_str("0x81000000").unwrap(), //#[allow_ci]
ek_from_hex_str("81000000").unwrap() //#[allow_ci]
);
assert_eq!(
ek_from_hex_str("0xdeadbeef").unwrap(), //#[allow_ci]
ek_from_hex_str("deadbeef").unwrap() //#[allow_ci]
);
assert!(ek_from_hex_str("a").is_ok());
assert!(ek_from_hex_str("18bb9").is_ok());
assert!(ek_from_hex_str("qqq").is_err());
assert!(ek_from_hex_str("0xqqq").is_err());
assert!(ek_from_hex_str("0xdeadbeefqwerty").is_err());
assert!(ek_from_hex_str("0x0x0x").is_err());
}
#[test]
fn mask() {
assert_eq!(read_mask("0x0").unwrap(), vec![]); //#[allow_ci]
assert_eq!(read_mask("0x1").unwrap(), vec![PcrSlot::Slot0]); //#[allow_ci]
assert_eq!(read_mask("0x2").unwrap(), vec![PcrSlot::Slot1]); //#[allow_ci]
assert_eq!(read_mask("0x4").unwrap(), vec![PcrSlot::Slot2]); //#[allow_ci]
assert_eq!(
read_mask("0x5").unwrap(), //#[allow_ci]
vec![PcrSlot::Slot0, PcrSlot::Slot2]
);
assert_eq!(
read_mask("0x6").unwrap(), //#[allow_ci]
vec![PcrSlot::Slot1, PcrSlot::Slot2]
);
assert_eq!(read_mask("0x800000").unwrap(), vec![PcrSlot::Slot23]); //#[allow_ci]
assert_eq!(
read_mask("0xffffff").unwrap(), //#[allow_ci]
vec![
PcrSlot::Slot0,
PcrSlot::Slot1,
PcrSlot::Slot2,
PcrSlot::Slot3,
PcrSlot::Slot4,
PcrSlot::Slot5,
PcrSlot::Slot6,
PcrSlot::Slot7,
PcrSlot::Slot8,
PcrSlot::Slot9,
PcrSlot::Slot10,
PcrSlot::Slot11,
PcrSlot::Slot12,
PcrSlot::Slot13,
PcrSlot::Slot14,
PcrSlot::Slot15,
PcrSlot::Slot16,
PcrSlot::Slot17,
PcrSlot::Slot18,
PcrSlot::Slot19,
PcrSlot::Slot20,
PcrSlot::Slot21,
PcrSlot::Slot22,
PcrSlot::Slot23
]
);
assert!(read_mask("0x1ffffff").is_err());
} | 20 => PcrSlot::Slot20,
21 => PcrSlot::Slot21,
22 => PcrSlot::Slot22, | random_line_split |
tpm.rs | // SPDX-License-Identifier: Apache-2.0
// Copyright 2021 Keylime Authors
#[macro_use]
use log::*;
use std::convert::{TryFrom, TryInto};
use std::io::prelude::*;
use std::str::FromStr;
use crate::{
common::config_get, quotes_handler::KeylimeIdQuote,
Error as KeylimeError, QuoteData, Result,
};
use actix_web::web::Data;
use openssl::{
hash::{Hasher, MessageDigest},
memcmp,
pkey::{Id, PKeyRef, Public},
rsa::Rsa,
};
use flate2::{write::ZlibEncoder, Compression};
use tss_esapi::{
abstraction::{ak, cipher::Cipher, ek, DefaultKey},
attributes::session::SessionAttributesBuilder,
constants::{
session_type::SessionType,
tss::{TPM2_ALG_NULL, TPM2_ST_ATTEST_QUOTE},
},
handles::{AuthHandle, KeyHandle, PcrHandle, SessionHandle},
interface_types::{
algorithm::{AsymmetricAlgorithm, HashingAlgorithm, SignatureScheme},
session_handles::AuthSession,
},
structures::{
Digest, DigestValues, EncryptedSecret, IDObject, Name,
PcrSelectionList, PcrSelectionListBuilder, PcrSlot,
},
tcti_ldr::TctiNameConf,
tss2_esys::{
Tss2_MU_TPM2B_PUBLIC_Marshal, Tss2_MU_TPMS_ATTEST_Unmarshal,
Tss2_MU_TPMT_SIGNATURE_Marshal, TPM2B_ATTEST, TPM2B_PUBLIC,
TPML_DIGEST, TPML_PCR_SELECTION, TPMS_ATTEST, TPMS_SCHEME_HASH,
TPMT_SIGNATURE, TPMT_SIG_SCHEME, TPMU_SIG_SCHEME,
},
utils::{PcrData, Signature},
Context,
};
/*
* Input: None
* Return: Connection context
*
* Example call:
* let mut ctx = tpm::get_tpm2_ctx();
*/
pub(crate) fn get_tpm2_ctx() -> Result<Context> {
let tcti_path = match std::env::var("TCTI") {
Ok(val) => val,
Err(_) => if std::path::Path::new("/dev/tpmrm0").exists() {
"device:/dev/tpmrm0"
} else {
"device:/dev/tpm0"
}
.to_string(),
};
let tcti = TctiNameConf::from_str(&tcti_path)?;
Context::new(tcti).map_err(|e| e.into())
}
/*
* Input: Connection context, asymmetric algo (optional)
* Return: (Key handle, public cert, TPM public object)
* Example call:
* let (key, cert, tpm_pub) = tpm::create_ek(context, Some(AsymmetricAlgorithm::Rsa))
*/
pub(crate) fn create_ek(
context: &mut Context,
alg: Option<AsymmetricAlgorithm>,
) -> Result<(KeyHandle, Option<Vec<u8>>, Vec<u8>)> {
// Set encryption algorithm
let alg = match alg {
Some(a) => a,
None => {
match config_get(
"cloud_agent",
"tpm_encryption_alg",
)?
.as_str()
{
"rsa" => AsymmetricAlgorithm::Rsa,
"ecc" => AsymmetricAlgorithm::Ecc,
_ => return Err(KeylimeError::Configuration(String::from("Encryption algorithm provided in keylime.conf is not supported")))
}
}
};
// Retrieve EK handle, EK pub cert, and TPM pub object
let handle = ek::create_ek_object(context, alg, DefaultKey)?;
let cert = match ek::retrieve_ek_pubcert(context, alg) {
Ok(v) => Some(v),
Err(_) => {
warn!("No EK certificate found in TPM NVRAM");
None
}
};
let (tpm_pub, _, _) = context.read_public(handle)?;
let tpm_pub_vec = pub_to_vec(tpm_pub);
Ok((handle, cert, tpm_pub_vec))
}
fn unmarshal_tpms_attest(val: &[u8]) -> Result<TPMS_ATTEST> {
let mut resp = TPMS_ATTEST::default();
let mut offset = 0u64;
unsafe {
let res = Tss2_MU_TPMS_ATTEST_Unmarshal(
val[..].as_ptr(),
val.len() as u64,
&mut offset,
&mut resp,
);
if res != 0 {
panic!("Error converting"); //#[allow_ci]
}
}
Ok(resp)
}
// Multiple TPM objects need to be marshaled for Quoting. This macro will
// create the appropriate functions when called below. The macro is intended
// to help with any future similar marshaling functions.
macro_rules! create_marshal_fn {
($func:ident, $tpmobj:ty, $marshal:ident) => {
fn $func(t: $tpmobj) -> Vec<u8> {
let mut offset = 0u64;
let size = std::mem::size_of::<$tpmobj>();
let mut tpm_vec = Vec::with_capacity(size);
unsafe {
let res = $marshal(
&t,
tpm_vec.as_mut_ptr(),
tpm_vec.capacity() as u64,
&mut offset,
);
if res != 0 {
panic!("out of memory or invalid data from TPM"); //#[allow_ci]
}
// offset is a buffer, so after marshaling function is called it holds the
// number of bytes written to the vector
tpm_vec.set_len(offset as usize);
}
tpm_vec
}
};
}
// These marshaling functions use the macro above and are based on this format:
// https://github.com/fedora-iot/clevis-pin-tpm2/blob/main/src/tpm_objects.rs#L64
// ... and on these marshaling functions:
// https://github.com/parallaxsecond/rust-tss-esapi/blob/main/tss-esapi-sys/src/ \
// bindings/x86_64-unknown-linux-gnu.rs#L16010
//
// Functions can be created using the following form:
// create_marshal_fn!(name_of_function_to_create, struct_to_be_marshaled, marshaling_function);
//
create_marshal_fn!(pub_to_vec, TPM2B_PUBLIC, Tss2_MU_TPM2B_PUBLIC_Marshal);
create_marshal_fn!(
sig_to_vec,
TPMT_SIGNATURE,
Tss2_MU_TPMT_SIGNATURE_Marshal
);
// Recreate how tpm2-tools creates the PCR out file. Roughly, this is a
// TPML_PCR_SELECTION + number of TPML_DIGESTS + TPML_DIGESTs.
// Reference:
// https://github.com/tpm2-software/tpm2-tools/blob/master/tools/tpm2_quote.c#L47-L91
//
// Note: tpm2-tools does not use its own documented marshaling functions for this output,
// so the below code recreates the idiosyncratic format tpm2-tools expects. The lengths
// of the vectors were determined by introspection into running tpm2-tools code. This is
// not ideal, and we should aim to move away from it if possible.
pub(crate) fn pcrdata_to_vec(
selection_list: PcrSelectionList,
pcrdata: PcrData,
) -> Vec<u8> {
let pcrsel: TPML_PCR_SELECTION = selection_list.into();
let pcrsel_vec: [u8; 132] = unsafe { std::mem::transmute(pcrsel) };
let digest: TPML_DIGEST = pcrdata.into();
let digest_vec: [u8; 532] = unsafe { std::mem::transmute(digest) };
let mut data_vec =
Vec::with_capacity(pcrsel_vec.len() + 4 + digest_vec.len());
data_vec.extend(&pcrsel_vec);
data_vec.extend(&1u32.to_le_bytes());
data_vec.extend(&digest_vec);
data_vec
}
/* Converts a hex value in the form of a string (ex. from keylime.conf's
* ek_handle) to a key handle.
*
* Input: &str
* Return: Key handle
*
* Example call:
* let ek_handle = tpm::ek_from_hex_str("0x81000000");
*/
pub(crate) fn ek_from_hex_str(val: &str) -> Result<KeyHandle> {
let val = val.trim_start_matches("0x");
Ok(KeyHandle::from(u32::from_str_radix(val, 16)?))
}
/* Creates AK and returns a tuple of its handle, name, and tpm2b_public as a vector.
*
* Input: Connection context, parent key's KeyHandle.
* Return: (Key handle, key name, TPM public object as a vector)
* Example call:
* let (key, name, tpm_pub) = tpm::create_ak(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic != TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version != 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn default() -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues> |
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000, which translates to 0x408000.
//
// The mask is a string because it is sent as a string from the tenant
// and verifier. The output from this function can be used to call a
// Quote from the TSS ESAPI.
//
pub(crate) fn read_mask(mask: &str) -> Result<Vec<PcrSlot>> {
let mut pcrs = Vec::new();
let num = u32::from_str_radix(mask.trim_start_matches("0x"), 16)?;
// check which bits are set
for i in 0..32 {
if num & (1 << i) != 0 {
pcrs.push(
match i {
0 => PcrSlot::Slot0,
1 => PcrSlot::Slot1,
2 => PcrSlot::Slot2,
3 => PcrSlot::Slot3,
4 => PcrSlot::Slot4,
5 => PcrSlot::Slot5,
6 => PcrSlot::Slot6,
7 => PcrSlot::Slot7,
8 => PcrSlot::Slot8,
9 => PcrSlot::Slot9,
10 => PcrSlot::Slot10,
11 => PcrSlot::Slot11,
12 => PcrSlot::Slot12,
13 => PcrSlot::Slot13,
14 => PcrSlot::Slot14,
15 => PcrSlot::Slot15,
16 => PcrSlot::Slot16,
17 => PcrSlot::Slot17,
18 => PcrSlot::Slot18,
19 => PcrSlot::Slot19,
20 => PcrSlot::Slot20,
21 => PcrSlot::Slot21,
22 => PcrSlot::Slot22,
23 => PcrSlot::Slot23,
bit => return Err(KeylimeError::Other(format!("malformed mask in integrity quote: only pcrs 0-23 can be included, but mask included pcr {:?}", bit))),
},
)
}
}
Ok(pcrs)
}
// This encodes a quote string as input to Python Keylime's quote checking functionality.
// The quote, signature, and pcr blob are concatenated with ':' separators. To match the
// expected format, the quote, signature, and pcr blob must be individually compressed
// with zlib at the default compression level and then base64 encoded before concatenation.
//
// Reference:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6 \
// /keylime/tpm/tpm_main.py#L964-L975
pub(crate) fn encode_quote_string(
att: TPM2B_ATTEST,
sig: Signature,
pcrs_read: PcrSelectionList,
pcr_data: PcrData,
) -> Result<String> {
// marshal structs to vec in expected formats. these formats are
// dictated by tpm2_tools.
let att_vec = &att.attestationData[0..att.size as usize];
let sig_vec = sig_to_vec(sig.try_into()?);
let pcr_vec = pcrdata_to_vec(pcrs_read, pcr_data);
// zlib compression
let mut att_comp = ZlibEncoder::new(Vec::new(), Compression::default());
att_comp.write_all(att_vec);
let att_comp_finished = att_comp.finish()?;
let mut sig_comp = ZlibEncoder::new(Vec::new(), Compression::default());
sig_comp.write_all(&sig_vec);
let sig_comp_finished = sig_comp.finish()?;
let mut pcr_comp = ZlibEncoder::new(Vec::new(), Compression::default());
pcr_comp.write_all(&pcr_vec);
let pcr_comp_finished = pcr_comp.finish()?;
// base64 encoding
let att_str = base64::encode(att_comp_finished);
let sig_str = base64::encode(sig_comp_finished);
let pcr_str = base64::encode(pcr_comp_finished);
// create concatenated string
let mut quote = String::new();
quote.push_str(&att_str);
quote.push(':');
quote.push_str(&sig_str);
quote.push(':');
quote.push_str(&pcr_str);
Ok(quote)
}
// This function extends Pcr16 with the digest, then creates a PcrList
// from the given mask and pcr16.
// Note: Currently, this will build the list for both SHA256 and SHA1 as
// necessary for the Python components of Keylime.
pub(crate) fn build_pcr_list(
context: &mut Context,
digest: DigestValues,
mask: Option<&str>,
) -> Result<PcrSelectionList> {
// extend digest into pcr16
context.execute_with_nullauth_session(|ctx| {
ctx.pcr_reset(PcrHandle::Pcr16)?;
ctx.pcr_extend(PcrHandle::Pcr16, digest.to_owned())
})?;
// translate mask to vec of pcrs
let mut pcrs = match mask {
Some(m) => read_mask(m)?,
None => Vec::new(),
};
// add pcr16 if it isn't in the vec already
if !pcrs.iter().any(|&pcr| pcr == PcrSlot::Slot16) {
let mut slot16 = vec![PcrSlot::Slot16];
pcrs.append(&mut slot16);
}
let ima_pcr_index = pcrs.iter().position(|&pcr| pcr == PcrSlot::Slot10);
let mut pcrlist = PcrSelectionListBuilder::new();
// remove IMA pcr before selecting for sha256 bank
if let Some(ima_pcr_index) = ima_pcr_index {
let _ = pcrs.remove(ima_pcr_index);
// add only IMA pcr for sha1 bank
let mut sha1_pcrs = vec![PcrSlot::Slot10];
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha1, &sha1_pcrs);
}
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha256, &pcrs);
let pcrlist = pcrlist.build();
Ok(pcrlist)
}
// The pcr blob corresponds to the pcr out file that records the list of PCR values,
// specified by tpm2tools, ex. 'tpm2_quote ... -o <pcrfilename>'. Read more here:
// https://github.com/tpm2-software/tpm2-tools/blob/master/man/tpm2_quote.1.md
//
// It is required by Python Keylime's check_quote functionality. For how the quote is
// checked, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L990
//
// For how the quote is created, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L965
//
pub(crate) fn make_pcr_blob(
context: &mut Context,
pcrlist: PcrSelectionList,
) -> Result<(PcrSelectionList, PcrData)> {
let (_, pcrs_read, pcr_data) =
context.execute_without_session(|ctx| ctx.pcr_read(&pcrlist))?;
if pcrs_read != pcrlist {
return Err(KeylimeError::Other(format!(
"could not read all pcrs; requested: {:?}, read: {:?}",
pcrlist, pcrs_read
)));
}
Ok((pcrs_read, pcr_data))
}
// Takes a TSS ESAPI HashingAlgorithm and returns the corresponding OpenSSL
// MessageDigest.
fn hash_alg_to_message_digest(
hash_alg: HashingAlgorithm,
) -> Result<MessageDigest> {
match hash_alg {
HashingAlgorithm::Sha256 => Ok(MessageDigest::sha256()),
HashingAlgorithm::Sha1 => Ok(MessageDigest::sha1()),
other => Err(KeylimeError::Other(format!(
"Unsupported hashing algorithm: {:?}",
other
))),
}
}
fn check_if_pcr_data_and_attestation_match(
hash_algo: HashingAlgorithm,
pcr_data: &PcrData,
attestation: &TPM2B_ATTEST,
) -> Result<bool> {
let pcr_data = TPML_DIGEST::try_from(pcr_data.clone())?;
let attestation = unmarshal_tpms_attest(&attestation.attestationData)?;
if attestation.type_ != TPM2_ST_ATTEST_QUOTE {
return Err(KeylimeError::Other(format!(
"Expected attestation type TPM2_ST_ATTEST_QUOTE, got {:?}",
attestation.type_
)));
}
let quote = unsafe { attestation.attested.quote };
let attested_pcr = quote.pcrDigest;
let attested_pcr = &attested_pcr.buffer[..attested_pcr.size as usize];
let mut hasher = Hasher::new(hash_alg_to_message_digest(hash_algo)?)?;
for i in 0..pcr_data.count {
let pcr = pcr_data.digests[i as usize];
hasher.update(&pcr.buffer[..pcr.size as usize])?;
}
let pcr_digest = hasher.finish()?;
log::trace!(
"Attested to PCR digest: {:?}, read PCR digest: {:?}",
attested_pcr,
pcr_digest,
);
Ok(memcmp::eq(attested_pcr, &pcr_digest))
}
const NUM_ATTESTATION_ATTEMPTS: i32 = 5;
fn perform_quote_and_pcr_read(
mut context: &mut Context,
ak_handle: KeyHandle,
nonce: &[u8],
pcrlist: PcrSelectionList,
) -> Result<(TPM2B_ATTEST, Signature, PcrSelectionList, PcrData)> {
let sig_scheme = get_sig_scheme(TpmSigScheme::default())?;
let nonce = nonce.try_into()?;
for attempt in 0..NUM_ATTESTATION_ATTEMPTS {
// TSS ESAPI quote does not create pcr blob, so create it separately
let (pcrs_read, pcr_data) =
make_pcr_blob(&mut context, pcrlist.clone())?;
// create quote
let (attestation, sig) = context.quote(
ak_handle,
&nonce,
sig_scheme,
pcrs_read.clone(),
)?;
// Check whether the attestation and pcr_data match
if (check_if_pcr_data_and_attestation_match(
HashingAlgorithm::Sha256,
&pcr_data,
&attestation,
)?) {
return Ok((attestation, sig, pcrs_read, pcr_data));
}
log::info!(
"PCR data and attestation data mismatched on attempt {}",
attempt
);
}
log::error!("PCR data and attestation data mismatched on all {} attempts, giving up", NUM_ATTESTATION_ATTEMPTS);
Err(KeylimeError::Other(
"Consistent race condition: can't make attestation match pcr_data"
.to_string(),
))
}
// Despite the return type, this function is used for both Identity and
// Integrity Quotes. The Quote handler will add additional information to
// turn an Identity Quote into an Integrity Quote.
pub(crate) fn quote(
nonce: &[u8],
mask: Option<&str>,
data: Data<QuoteData>,
) -> Result<KeylimeIdQuote> {
let hash_alg = get_hash_alg(config_get("cloud_agent", "tpm_hash_alg")?)?;
let nk_digest = pubkey_to_tpm_digest(&data.pub_key)?;
// must unwrap here due to lock mechanism
// https://github.com/rust-lang-nursery/failure/issues/192
let mut context = data.tpmcontext.lock().unwrap(); //#[allow_ci]
let pcrlist = build_pcr_list(&mut context, nk_digest, mask)?;
let (attestation, sig, pcrs_read, pcr_data) = context
.execute_with_nullauth_session(|mut ctx| {
perform_quote_and_pcr_read(
&mut ctx,
data.ak_handle,
nonce,
pcrlist,
)
})?;
let quote = encode_quote_string(attestation, sig, pcrs_read, pcr_data)?;
let mut keylimequote = KeylimeIdQuote::default();
keylimequote.quote.push_str("e);
Ok(keylimequote)
}
#[ignore] // This will only work as an integration test because it needs keylime.conf
#[test]
fn pubkey_to_digest() {
let (key, _) = crate::crypto::rsa_generate_pair(2048).unwrap(); //#[allow_ci]
let digest = pubkey_to_tpm_digest(&key).unwrap(); //#[allow_ci]
}
#[test]
fn ek_from_hex() {
assert_eq!(
ek_from_hex_str("0x81000000").unwrap(), //#[allow_ci]
ek_from_hex_str("81000000").unwrap() //#[allow_ci]
);
assert_eq!(
ek_from_hex_str("0xdeadbeef").unwrap(), //#[allow_ci]
ek_from_hex_str("deadbeef").unwrap() //#[allow_ci]
);
assert!(ek_from_hex_str("a").is_ok());
assert!(ek_from_hex_str("18bb9").is_ok());
assert!(ek_from_hex_str("qqq").is_err());
assert!(ek_from_hex_str("0xqqq").is_err());
assert!(ek_from_hex_str("0xdeadbeefqwerty").is_err());
assert!(ek_from_hex_str("0x0x0x").is_err());
}
#[test]
fn mask() {
assert_eq!(read_mask("0x0").unwrap(), vec![]); //#[allow_ci]
assert_eq!(read_mask("0x1").unwrap(), vec![PcrSlot::Slot0]); //#[allow_ci]
assert_eq!(read_mask("0x2").unwrap(), vec![PcrSlot::Slot1]); //#[allow_ci]
assert_eq!(read_mask("0x4").unwrap(), vec![PcrSlot::Slot2]); //#[allow_ci]
assert_eq!(
read_mask("0x5").unwrap(), //#[allow_ci]
vec![PcrSlot::Slot0, PcrSlot::Slot2]
);
assert_eq!(
read_mask("0x6").unwrap(), //#[allow_ci]
vec![PcrSlot::Slot1, PcrSlot::Slot2]
);
assert_eq!(read_mask("0x800000").unwrap(), vec![PcrSlot::Slot23]); //#[allow_ci]
assert_eq!(
read_mask("0xffffff").unwrap(), //#[allow_ci]
vec![
PcrSlot::Slot0,
PcrSlot::Slot1,
PcrSlot::Slot2,
PcrSlot::Slot3,
PcrSlot::Slot4,
PcrSlot::Slot5,
PcrSlot::Slot6,
PcrSlot::Slot7,
PcrSlot::Slot8,
PcrSlot::Slot9,
PcrSlot::Slot10,
PcrSlot::Slot11,
PcrSlot::Slot12,
PcrSlot::Slot13,
PcrSlot::Slot14,
PcrSlot::Slot15,
PcrSlot::Slot16,
PcrSlot::Slot17,
PcrSlot::Slot18,
PcrSlot::Slot19,
PcrSlot::Slot20,
PcrSlot::Slot21,
PcrSlot::Slot22,
PcrSlot::Slot23
]
);
assert!(read_mask("0x1ffffff").is_err());
}
| {
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
} | identifier_body |
tpm.rs | // SPDX-License-Identifier: Apache-2.0
// Copyright 2021 Keylime Authors
#[macro_use]
use log::*;
use std::convert::{TryFrom, TryInto};
use std::io::prelude::*;
use std::str::FromStr;
use crate::{
common::config_get, quotes_handler::KeylimeIdQuote,
Error as KeylimeError, QuoteData, Result,
};
use actix_web::web::Data;
use openssl::{
hash::{Hasher, MessageDigest},
memcmp,
pkey::{Id, PKeyRef, Public},
rsa::Rsa,
};
use flate2::{write::ZlibEncoder, Compression};
use tss_esapi::{
abstraction::{ak, cipher::Cipher, ek, DefaultKey},
attributes::session::SessionAttributesBuilder,
constants::{
session_type::SessionType,
tss::{TPM2_ALG_NULL, TPM2_ST_ATTEST_QUOTE},
},
handles::{AuthHandle, KeyHandle, PcrHandle, SessionHandle},
interface_types::{
algorithm::{AsymmetricAlgorithm, HashingAlgorithm, SignatureScheme},
session_handles::AuthSession,
},
structures::{
Digest, DigestValues, EncryptedSecret, IDObject, Name,
PcrSelectionList, PcrSelectionListBuilder, PcrSlot,
},
tcti_ldr::TctiNameConf,
tss2_esys::{
Tss2_MU_TPM2B_PUBLIC_Marshal, Tss2_MU_TPMS_ATTEST_Unmarshal,
Tss2_MU_TPMT_SIGNATURE_Marshal, TPM2B_ATTEST, TPM2B_PUBLIC,
TPML_DIGEST, TPML_PCR_SELECTION, TPMS_ATTEST, TPMS_SCHEME_HASH,
TPMT_SIGNATURE, TPMT_SIG_SCHEME, TPMU_SIG_SCHEME,
},
utils::{PcrData, Signature},
Context,
};
/*
* Input: None
* Return: Connection context
*
* Example call:
* let mut ctx = tpm::get_tpm2_ctx();
*/
pub(crate) fn get_tpm2_ctx() -> Result<Context> {
let tcti_path = match std::env::var("TCTI") {
Ok(val) => val,
Err(_) => if std::path::Path::new("/dev/tpmrm0").exists() {
"device:/dev/tpmrm0"
} else {
"device:/dev/tpm0"
}
.to_string(),
};
let tcti = TctiNameConf::from_str(&tcti_path)?;
Context::new(tcti).map_err(|e| e.into())
}
/*
* Input: Connection context, asymmetric algo (optional)
* Return: (Key handle, public cert, TPM public object)
* Example call:
* let (key, cert, tpm_pub) = tpm::create_ek(context, Some(AsymmetricAlgorithm::Rsa))
*/
pub(crate) fn create_ek(
context: &mut Context,
alg: Option<AsymmetricAlgorithm>,
) -> Result<(KeyHandle, Option<Vec<u8>>, Vec<u8>)> {
// Set encryption algorithm
let alg = match alg {
Some(a) => a,
None => {
match config_get(
"cloud_agent",
"tpm_encryption_alg",
)?
.as_str()
{
"rsa" => AsymmetricAlgorithm::Rsa,
"ecc" => AsymmetricAlgorithm::Ecc,
_ => return Err(KeylimeError::Configuration(String::from("Encryption algorithm provided in keylime.conf is not supported")))
}
}
};
// Retrieve EK handle, EK pub cert, and TPM pub object
let handle = ek::create_ek_object(context, alg, DefaultKey)?;
let cert = match ek::retrieve_ek_pubcert(context, alg) {
Ok(v) => Some(v),
Err(_) => {
warn!("No EK certificate found in TPM NVRAM");
None
}
};
let (tpm_pub, _, _) = context.read_public(handle)?;
let tpm_pub_vec = pub_to_vec(tpm_pub);
Ok((handle, cert, tpm_pub_vec))
}
fn unmarshal_tpms_attest(val: &[u8]) -> Result<TPMS_ATTEST> {
let mut resp = TPMS_ATTEST::default();
let mut offset = 0u64;
unsafe {
let res = Tss2_MU_TPMS_ATTEST_Unmarshal(
val[..].as_ptr(),
val.len() as u64,
&mut offset,
&mut resp,
);
if res != 0 {
panic!("Error converting"); //#[allow_ci]
}
}
Ok(resp)
}
// Multiple TPM objects need to be marshaled for Quoting. This macro will
// create the appropriate functions when called below. The macro is intended
// to help with any future similar marshaling functions.
macro_rules! create_marshal_fn {
($func:ident, $tpmobj:ty, $marshal:ident) => {
fn $func(t: $tpmobj) -> Vec<u8> {
let mut offset = 0u64;
let size = std::mem::size_of::<$tpmobj>();
let mut tpm_vec = Vec::with_capacity(size);
unsafe {
let res = $marshal(
&t,
tpm_vec.as_mut_ptr(),
tpm_vec.capacity() as u64,
&mut offset,
);
if res != 0 {
panic!("out of memory or invalid data from TPM"); //#[allow_ci]
}
// offset is a buffer, so after marshaling function is called it holds the
// number of bytes written to the vector
tpm_vec.set_len(offset as usize);
}
tpm_vec
}
};
}
// These marshaling functions use the macro above and are based on this format:
// https://github.com/fedora-iot/clevis-pin-tpm2/blob/main/src/tpm_objects.rs#L64
// ... and on these marshaling functions:
// https://github.com/parallaxsecond/rust-tss-esapi/blob/main/tss-esapi-sys/src/ \
// bindings/x86_64-unknown-linux-gnu.rs#L16010
//
// Functions can be created using the following form:
// create_marshal_fn!(name_of_function_to_create, struct_to_be_marshaled, marshaling_function);
//
create_marshal_fn!(pub_to_vec, TPM2B_PUBLIC, Tss2_MU_TPM2B_PUBLIC_Marshal);
create_marshal_fn!(
sig_to_vec,
TPMT_SIGNATURE,
Tss2_MU_TPMT_SIGNATURE_Marshal
);
// Recreate how tpm2-tools creates the PCR out file. Roughly, this is a
// TPML_PCR_SELECTION + number of TPML_DIGESTS + TPML_DIGESTs.
// Reference:
// https://github.com/tpm2-software/tpm2-tools/blob/master/tools/tpm2_quote.c#L47-L91
//
// Note: tpm2-tools does not use its own documented marshaling functions for this output,
// so the below code recreates the idiosyncratic format tpm2-tools expects. The lengths
// of the vectors were determined by introspection into running tpm2-tools code. This is
// not ideal, and we should aim to move away from it if possible.
pub(crate) fn pcrdata_to_vec(
selection_list: PcrSelectionList,
pcrdata: PcrData,
) -> Vec<u8> {
let pcrsel: TPML_PCR_SELECTION = selection_list.into();
let pcrsel_vec: [u8; 132] = unsafe { std::mem::transmute(pcrsel) };
let digest: TPML_DIGEST = pcrdata.into();
let digest_vec: [u8; 532] = unsafe { std::mem::transmute(digest) };
let mut data_vec =
Vec::with_capacity(pcrsel_vec.len() + 4 + digest_vec.len());
data_vec.extend(&pcrsel_vec);
data_vec.extend(&1u32.to_le_bytes());
data_vec.extend(&digest_vec);
data_vec
}
/* Converts a hex value in the form of a string (ex. from keylime.conf's
* ek_handle) to a key handle.
*
* Input: &str
* Return: Key handle
*
* Example call:
* let ek_handle = tpm::ek_from_hex_str("0x81000000");
*/
pub(crate) fn ek_from_hex_str(val: &str) -> Result<KeyHandle> {
let val = val.trim_start_matches("0x");
Ok(KeyHandle::from(u32::from_str_radix(val, 16)?))
}
/* Creates AK and returns a tuple of its handle, name, and tpm2b_public as a vector.
*
* Input: Connection context, parent key's KeyHandle.
* Return: (Key handle, key name, TPM public object as a vector)
* Example call:
* let (key, name, tpm_pub) = tpm::create_ak(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic != TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version != 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn | () -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues> {
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
}
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000, which translates to 0x408000.
//
// The mask is a string because it is sent as a string from the tenant
// and verifier. The output from this function can be used to call a
// Quote from the TSS ESAPI.
//
pub(crate) fn read_mask(mask: &str) -> Result<Vec<PcrSlot>> {
let mut pcrs = Vec::new();
let num = u32::from_str_radix(mask.trim_start_matches("0x"), 16)?;
// check which bits are set
for i in 0..32 {
if num & (1 << i) != 0 {
pcrs.push(
match i {
0 => PcrSlot::Slot0,
1 => PcrSlot::Slot1,
2 => PcrSlot::Slot2,
3 => PcrSlot::Slot3,
4 => PcrSlot::Slot4,
5 => PcrSlot::Slot5,
6 => PcrSlot::Slot6,
7 => PcrSlot::Slot7,
8 => PcrSlot::Slot8,
9 => PcrSlot::Slot9,
10 => PcrSlot::Slot10,
11 => PcrSlot::Slot11,
12 => PcrSlot::Slot12,
13 => PcrSlot::Slot13,
14 => PcrSlot::Slot14,
15 => PcrSlot::Slot15,
16 => PcrSlot::Slot16,
17 => PcrSlot::Slot17,
18 => PcrSlot::Slot18,
19 => PcrSlot::Slot19,
20 => PcrSlot::Slot20,
21 => PcrSlot::Slot21,
22 => PcrSlot::Slot22,
23 => PcrSlot::Slot23,
bit => return Err(KeylimeError::Other(format!("malformed mask in integrity quote: only pcrs 0-23 can be included, but mask included pcr {:?}", bit))),
},
)
}
}
Ok(pcrs)
}
// This encodes a quote string as input to Python Keylime's quote checking functionality.
// The quote, signature, and pcr blob are concatenated with ':' separators. To match the
// expected format, the quote, signature, and pcr blob must be individually compressed
// with zlib at the default compression level and then base64 encoded before concatenation.
//
// Reference:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6 \
// /keylime/tpm/tpm_main.py#L964-L975
pub(crate) fn encode_quote_string(
att: TPM2B_ATTEST,
sig: Signature,
pcrs_read: PcrSelectionList,
pcr_data: PcrData,
) -> Result<String> {
// marshal structs to vec in expected formats. these formats are
// dictated by tpm2_tools.
let att_vec = &att.attestationData[0..att.size as usize];
let sig_vec = sig_to_vec(sig.try_into()?);
let pcr_vec = pcrdata_to_vec(pcrs_read, pcr_data);
// zlib compression
let mut att_comp = ZlibEncoder::new(Vec::new(), Compression::default());
att_comp.write_all(att_vec);
let att_comp_finished = att_comp.finish()?;
let mut sig_comp = ZlibEncoder::new(Vec::new(), Compression::default());
sig_comp.write_all(&sig_vec);
let sig_comp_finished = sig_comp.finish()?;
let mut pcr_comp = ZlibEncoder::new(Vec::new(), Compression::default());
pcr_comp.write_all(&pcr_vec);
let pcr_comp_finished = pcr_comp.finish()?;
// base64 encoding
let att_str = base64::encode(att_comp_finished);
let sig_str = base64::encode(sig_comp_finished);
let pcr_str = base64::encode(pcr_comp_finished);
// create concatenated string
let mut quote = String::new();
quote.push_str(&att_str);
quote.push(':');
quote.push_str(&sig_str);
quote.push(':');
quote.push_str(&pcr_str);
Ok(quote)
}
// This function extends Pcr16 with the digest, then creates a PcrList
// from the given mask and pcr16.
// Note: Currently, this will build the list for both SHA256 and SHA1 as
// necessary for the Python components of Keylime.
pub(crate) fn build_pcr_list(
context: &mut Context,
digest: DigestValues,
mask: Option<&str>,
) -> Result<PcrSelectionList> {
// extend digest into pcr16
context.execute_with_nullauth_session(|ctx| {
ctx.pcr_reset(PcrHandle::Pcr16)?;
ctx.pcr_extend(PcrHandle::Pcr16, digest.to_owned())
})?;
// translate mask to vec of pcrs
let mut pcrs = match mask {
Some(m) => read_mask(m)?,
None => Vec::new(),
};
// add pcr16 if it isn't in the vec already
if !pcrs.iter().any(|&pcr| pcr == PcrSlot::Slot16) {
let mut slot16 = vec![PcrSlot::Slot16];
pcrs.append(&mut slot16);
}
let ima_pcr_index = pcrs.iter().position(|&pcr| pcr == PcrSlot::Slot10);
let mut pcrlist = PcrSelectionListBuilder::new();
// remove IMA pcr before selecting for sha256 bank
if let Some(ima_pcr_index) = ima_pcr_index {
let _ = pcrs.remove(ima_pcr_index);
// add only IMA pcr for sha1 bank
let mut sha1_pcrs = vec![PcrSlot::Slot10];
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha1, &sha1_pcrs);
}
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha256, &pcrs);
let pcrlist = pcrlist.build();
Ok(pcrlist)
}
// The pcr blob corresponds to the pcr out file that records the list of PCR values,
// specified by tpm2tools, ex. 'tpm2_quote ... -o <pcrfilename>'. Read more here:
// https://github.com/tpm2-software/tpm2-tools/blob/master/man/tpm2_quote.1.md
//
// It is required by Python Keylime's check_quote functionality. For how the quote is
// checked, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L990
//
// For how the quote is created, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L965
//
pub(crate) fn make_pcr_blob(
context: &mut Context,
pcrlist: PcrSelectionList,
) -> Result<(PcrSelectionList, PcrData)> {
let (_, pcrs_read, pcr_data) =
context.execute_without_session(|ctx| ctx.pcr_read(&pcrlist))?;
if pcrs_read != pcrlist {
return Err(KeylimeError::Other(format!(
"could not read all pcrs; requested: {:?}, read: {:?}",
pcrlist, pcrs_read
)));
}
Ok((pcrs_read, pcr_data))
}
// Takes a TSS ESAPI HashingAlgorithm and returns the corresponding OpenSSL
// MessageDigest.
fn hash_alg_to_message_digest(
hash_alg: HashingAlgorithm,
) -> Result<MessageDigest> {
match hash_alg {
HashingAlgorithm::Sha256 => Ok(MessageDigest::sha256()),
HashingAlgorithm::Sha1 => Ok(MessageDigest::sha1()),
other => Err(KeylimeError::Other(format!(
"Unsupported hashing algorithm: {:?}",
other
))),
}
}
fn check_if_pcr_data_and_attestation_match(
hash_algo: HashingAlgorithm,
pcr_data: &PcrData,
attestation: &TPM2B_ATTEST,
) -> Result<bool> {
let pcr_data = TPML_DIGEST::try_from(pcr_data.clone())?;
let attestation = unmarshal_tpms_attest(&attestation.attestationData)?;
if attestation.type_ != TPM2_ST_ATTEST_QUOTE {
return Err(KeylimeError::Other(format!(
"Expected attestation type TPM2_ST_ATTEST_QUOTE, got {:?}",
attestation.type_
)));
}
let quote = unsafe { attestation.attested.quote };
let attested_pcr = quote.pcrDigest;
let attested_pcr = &attested_pcr.buffer[..attested_pcr.size as usize];
let mut hasher = Hasher::new(hash_alg_to_message_digest(hash_algo)?)?;
for i in 0..pcr_data.count {
let pcr = pcr_data.digests[i as usize];
hasher.update(&pcr.buffer[..pcr.size as usize])?;
}
let pcr_digest = hasher.finish()?;
log::trace!(
"Attested to PCR digest: {:?}, read PCR digest: {:?}",
attested_pcr,
pcr_digest,
);
Ok(memcmp::eq(attested_pcr, &pcr_digest))
}
const NUM_ATTESTATION_ATTEMPTS: i32 = 5;
fn perform_quote_and_pcr_read(
mut context: &mut Context,
ak_handle: KeyHandle,
nonce: &[u8],
pcrlist: PcrSelectionList,
) -> Result<(TPM2B_ATTEST, Signature, PcrSelectionList, PcrData)> {
let sig_scheme = get_sig_scheme(TpmSigScheme::default())?;
let nonce = nonce.try_into()?;
for attempt in 0..NUM_ATTESTATION_ATTEMPTS {
// TSS ESAPI quote does not create pcr blob, so create it separately
let (pcrs_read, pcr_data) =
make_pcr_blob(&mut context, pcrlist.clone())?;
// create quote
let (attestation, sig) = context.quote(
ak_handle,
&nonce,
sig_scheme,
pcrs_read.clone(),
)?;
// Check whether the attestation and pcr_data match
if (check_if_pcr_data_and_attestation_match(
HashingAlgorithm::Sha256,
&pcr_data,
&attestation,
)?) {
return Ok((attestation, sig, pcrs_read, pcr_data));
}
log::info!(
"PCR data and attestation data mismatched on attempt {}",
attempt
);
}
log::error!("PCR data and attestation data mismatched on all {} attempts, giving up", NUM_ATTESTATION_ATTEMPTS);
Err(KeylimeError::Other(
"Consistent race condition: can't make attestation match pcr_data"
.to_string(),
))
}
// Despite the return type, this function is used for both Identity and
// Integrity Quotes. The Quote handler will add additional information to
// turn an Identity Quote into an Integrity Quote.
pub(crate) fn quote(
nonce: &[u8],
mask: Option<&str>,
data: Data<QuoteData>,
) -> Result<KeylimeIdQuote> {
let hash_alg = get_hash_alg(config_get("cloud_agent", "tpm_hash_alg")?)?;
let nk_digest = pubkey_to_tpm_digest(&data.pub_key)?;
// must unwrap here due to lock mechanism
// https://github.com/rust-lang-nursery/failure/issues/192
let mut context = data.tpmcontext.lock().unwrap(); //#[allow_ci]
let pcrlist = build_pcr_list(&mut context, nk_digest, mask)?;
let (attestation, sig, pcrs_read, pcr_data) = context
.execute_with_nullauth_session(|mut ctx| {
perform_quote_and_pcr_read(
&mut ctx,
data.ak_handle,
nonce,
pcrlist,
)
})?;
let quote = encode_quote_string(attestation, sig, pcrs_read, pcr_data)?;
let mut keylimequote = KeylimeIdQuote::default();
keylimequote.quote.push_str("e);
Ok(keylimequote)
}
#[ignore] // This will only work as an integration test because it needs keylime.conf
#[test]
fn pubkey_to_digest() {
let (key, _) = crate::crypto::rsa_generate_pair(2048).unwrap(); //#[allow_ci]
let digest = pubkey_to_tpm_digest(&key).unwrap(); //#[allow_ci]
}
#[test]
fn ek_from_hex() {
assert_eq!(
ek_from_hex_str("0x81000000").unwrap(), //#[allow_ci]
ek_from_hex_str("81000000").unwrap() //#[allow_ci]
);
assert_eq!(
ek_from_hex_str("0xdeadbeef").unwrap(), //#[allow_ci]
ek_from_hex_str("deadbeef").unwrap() //#[allow_ci]
);
assert!(ek_from_hex_str("a").is_ok());
assert!(ek_from_hex_str("18bb9").is_ok());
assert!(ek_from_hex_str("qqq").is_err());
assert!(ek_from_hex_str("0xqqq").is_err());
assert!(ek_from_hex_str("0xdeadbeefqwerty").is_err());
assert!(ek_from_hex_str("0x0x0x").is_err());
}
#[test]
fn mask() {
assert_eq!(read_mask("0x0").unwrap(), vec![]); //#[allow_ci]
assert_eq!(read_mask("0x1").unwrap(), vec![PcrSlot::Slot0]); //#[allow_ci]
assert_eq!(read_mask("0x2").unwrap(), vec![PcrSlot::Slot1]); //#[allow_ci]
assert_eq!(read_mask("0x4").unwrap(), vec![PcrSlot::Slot2]); //#[allow_ci]
assert_eq!(
read_mask("0x5").unwrap(), //#[allow_ci]
vec![PcrSlot::Slot0, PcrSlot::Slot2]
);
assert_eq!(
read_mask("0x6").unwrap(), //#[allow_ci]
vec![PcrSlot::Slot1, PcrSlot::Slot2]
);
assert_eq!(read_mask("0x800000").unwrap(), vec![PcrSlot::Slot23]); //#[allow_ci]
assert_eq!(
read_mask("0xffffff").unwrap(), //#[allow_ci]
vec![
PcrSlot::Slot0,
PcrSlot::Slot1,
PcrSlot::Slot2,
PcrSlot::Slot3,
PcrSlot::Slot4,
PcrSlot::Slot5,
PcrSlot::Slot6,
PcrSlot::Slot7,
PcrSlot::Slot8,
PcrSlot::Slot9,
PcrSlot::Slot10,
PcrSlot::Slot11,
PcrSlot::Slot12,
PcrSlot::Slot13,
PcrSlot::Slot14,
PcrSlot::Slot15,
PcrSlot::Slot16,
PcrSlot::Slot17,
PcrSlot::Slot18,
PcrSlot::Slot19,
PcrSlot::Slot20,
PcrSlot::Slot21,
PcrSlot::Slot22,
PcrSlot::Slot23
]
);
assert!(read_mask("0x1ffffff").is_err());
}
| default | identifier_name |
size_cache_fs.go | package kafero
import (
"encoding/json"
"fmt"
"github.com/wangjia184/sortedset"
"io"
"math"
"os"
"path/filepath"
"sync"
"syscall"
"time"
)
// The SizeCacheFS is a cache file system composed of a cache layer and a base layer
// the cache layer has a maximal size, and files get evicted relative to their
// last use time (read or edited).
// If you change something on the file, need to change on base and cache
// even if cache is stale (invalidated), easier to just do it
type cacheFile struct {
Path string
Size int64
LastAccessTime int64
}
type SizeCacheFS struct {
base Fs
cache Fs
cacheSize int64
cacheTime time.Duration
currSize int64
files *sortedset.SortedSet
cacheL sync.Mutex
}
func NewSizeCacheFS(base Fs, cache Fs, cacheSize int64, cacheTime time.Duration) (*SizeCacheFS, error) {
if cacheSize < 0 {
cacheSize = 0
}
exists, err := Exists(cache, ".cacheindex")
if err != nil {
return nil, fmt.Errorf("error determining if cache index exists: %v", err)
}
var files []*cacheFile
if !exists {
err := Walk(cache, "", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
file := &cacheFile{
Path: path,
Size: info.Size(),
LastAccessTime: info.ModTime().UnixNano() / 1000000,
}
files = append(files, file)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("error building cache index: %v", err)
}
} else {
data, err := ReadFile(cache, ".cacheindex")
if err != nil {
return nil, fmt.Errorf("error reading cache index: %v", err)
}
if err := json.Unmarshal(data, &files); err != nil {
return nil, fmt.Errorf("error unmarshalling files: %v", err)
}
}
var currSize int64 = 0
set := sortedset.New()
for _, f := range files {
set.AddOrUpdate(f.Path, sortedset.SCORE(f.LastAccessTime), f)
currSize += f.Size
}
fs := &SizeCacheFS{
base: base,
cache: cache,
cacheSize: cacheSize,
cacheTime: cacheTime,
currSize: currSize,
files: set,
}
return fs, nil
}
func (u *SizeCacheFS) getCacheFile(name string) (info *cacheFile) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node == nil {
return nil
} else {
return node.Value.(*cacheFile)
}
}
func (u *SizeCacheFS) addToCache(info *cacheFile) error {
u.cacheL.Lock()
defer u.cacheL.Unlock()
// check if we aren't already inside
node := u.files.GetByKey(info.Path)
if node != nil {
file := node.Value.(*cacheFile)
u.currSize -= file.Size
}
// while we can pop files and the cache is full..
for u.currSize > 0 && u.currSize+info.Size > u.cacheSize {
node := u.files.PopMin()
// node CAN'T be nil as currSize > 0
file := node.Value.(*cacheFile)
if err := u.cache.Remove(file.Path); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.currSize -= file.Size
path := filepath.Dir(file.Path)
for path != "" && path != "." && path != "/" {
f, err := u.cache.Open(path)
if err != nil {
_ = f.Close()
return fmt.Errorf("error opening parent directory: %v", err)
}
dirs, err := f.Readdir(-1)
if err != nil {
_ = f.Close()
return fmt.Errorf("error reading parent directory: %v", err)
}
_ = f.Close()
if len(dirs) == 0 {
if err := u.cache.Remove(path); err != nil {
return fmt.Errorf("error removing parent directory: %v", err)
}
path = filepath.Dir(path)
} else {
break
}
}
}
u.files.AddOrUpdate(info.Path, sortedset.SCORE(info.LastAccessTime), info)
u.currSize += info.Size
return nil
}
func (u *SizeCacheFS) removeFromCache(name string) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node != nil {
// If we remove file that is open, the file will re-add itself in
// the cache on close. This is expected behavior as a removed open file
// will re-appear on close ?
u.files.Remove(name)
info := node.Value.(*cacheFile)
u.currSize -= info.Size
}
}
/*
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
*/
func (u *SizeCacheFS) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.cache.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
} else if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
} else {
return cacheMiss, nil, err
}
}
func (u *SizeCacheFS) copyToCache(name string) (*cacheFile, error) {
// If layer file exists, we need to remove it
// and replace it with current file
// TODO
// Get size, if size over our limit, evict one file
bfh, err := u.base.Open(name)
if err != nil {
if err == os.ErrNotExist {
return nil, err
} else {
return nil, fmt.Errorf("error opening base file: %v", err)
}
}
// First make sure the directory exists
exists, err := Exists(u.cache, filepath.Dir(name))
if err != nil {
return nil, err
}
if !exists {
err = u.cache.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return nil, err
}
}
// Create the file on the overlay
lfh, err := u.cache.Create(name)
if err != nil {
return nil, err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, fmt.Errorf("error copying layer to base: %v", err)
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, syscall.EIO
}
isDir := bfi.IsDir()
err = lfh.Close()
if err != nil {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, err
}
if err := bfh.Close(); err != nil {
return nil, fmt.Errorf("error closing base file: %v", err)
}
if err := u.cache.Chtimes(name, bfi.ModTime(), bfi.ModTime()); err != nil {
return nil, err
}
// if cache is stale and file already inside sorted set, we are just going to update it
// Create info
if !isDir {
info := &cacheFile{
Path: name,
Size: bfi.Size(),
LastAccessTime: time.Now().UnixNano() / 1000,
}
return info, nil
} else {
return nil, nil
}
}
func (u *SizeCacheFS) Chtimes(name string, atime, mtime time.Time) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chtimes(name, atime, mtime)
}
return u.base.Chtimes(name, atime, mtime)
}
func (u *SizeCacheFS) Chmod(name string, mode os.FileMode) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chmod(name, mode)
}
return u.base.Chmod(name, mode)
}
func (u *SizeCacheFS) Stat(name string) (os.FileInfo, error) {
return u.base.Stat(name)
}
func (u *SizeCacheFS) Rename(oldname, newname string) error {
exists, err := Exists(u.cache, oldname)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
info := u.getCacheFile(oldname)
u.removeFromCache(oldname)
info.Path = newname
if err := u.addToCache(info); err != nil {
return err
}
if err := u.cache.Rename(oldname, newname); err != nil {
return err
}
}
return u.base.Rename(oldname, newname)
}
func (u *SizeCacheFS) Remove(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return fmt.Errorf("error determining if file exists: %v", err)
}
// If cache file exists, update to ensure consistency
if exists {
if err := u.cache.Remove(name); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.removeFromCache(name)
}
return u.base.Remove(name)
}
func (u *SizeCacheFS) RemoveAll(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
err := Walk(u.cache, name, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return u.Remove(path)
} else {
return nil
}
})
if err != nil {
return err
}
// Remove the dirs
_ = u.cache.RemoveAll(name)
}
return u.base.RemoveAll(name)
}
func (u *SizeCacheFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
exists, err := Exists(u.base, name)
if err != nil {
return nil, fmt.Errorf("error determining if base file exists: %v", err)
}
if exists {
var err error
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
// It is not a dir, we cannot open a non existing dir
info = &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
}
}
var cacheFlag = flag
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
// Force read write mode
cacheFlag = (flag & (^os.O_WRONLY)) | os.O_RDWR
}
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.cache.OpenFile(name, cacheFlag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
uf := NewSizeCacheFile(bfi, lfi, flag, u, info)
return uf, nil
}
func (u *SizeCacheFS) Open(name string) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if !bfi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
case cacheStale:
if !fi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.cache.Open(name)
if err != nil && bfile == nil {
return nil, err
}
fi, err = u.cache.Stat(name)
if err != nil |
uf := NewSizeCacheFile(bfile, lfile, os.O_RDONLY, u, info)
return uf, nil
}
func (u *SizeCacheFS) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.cache.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *SizeCacheFS) Name() string {
return "SizeCacheFS"
}
func (u *SizeCacheFS) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.cache.MkdirAll(name, perm)
}
func (u *SizeCacheFS) Create(name string) (File, error) {
bfile, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfile, err := u.cache.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
_ = bfile.Close()
return nil, err
}
info := &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
// Ensure file is out
u.removeFromCache(name)
uf := NewSizeCacheFile(bfile, lfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, u, info)
return uf, nil
}
func (u *SizeCacheFS) Size() int64 {
return u.currSize
}
func (u *SizeCacheFS) Close() error {
// TODO close all open files
// Save index
var files []*cacheFile
nodes := u.files.GetByScoreRange(math.MinInt64, math.MaxInt64, nil)
for _, n := range nodes {
f := n.Value.(*cacheFile)
files = append(files, f)
}
data, err := json.Marshal(files)
if err != nil {
return fmt.Errorf("error marshalling files: %v", err)
}
if err := WriteFile(u.cache, ".cacheindex", data, 0644); err != nil {
return fmt.Errorf("error writing cache index: %v", err)
}
return nil
}
| {
return nil, err
} | conditional_block |
size_cache_fs.go | package kafero
import (
"encoding/json"
"fmt"
"github.com/wangjia184/sortedset"
"io"
"math"
"os"
"path/filepath"
"sync"
"syscall"
"time"
)
// The SizeCacheFS is a cache file system composed of a cache layer and a base layer
// the cache layer has a maximal size, and files get evicted relative to their
// last use time (read or edited).
// If you change something on the file, need to change on base and cache
// even if cache is stale (invalidated), easier to just do it
type cacheFile struct {
Path string
Size int64
LastAccessTime int64
}
type SizeCacheFS struct {
base Fs
cache Fs
cacheSize int64
cacheTime time.Duration
currSize int64
files *sortedset.SortedSet
cacheL sync.Mutex
}
func NewSizeCacheFS(base Fs, cache Fs, cacheSize int64, cacheTime time.Duration) (*SizeCacheFS, error) {
if cacheSize < 0 {
cacheSize = 0
}
exists, err := Exists(cache, ".cacheindex")
if err != nil {
return nil, fmt.Errorf("error determining if cache index exists: %v", err)
}
var files []*cacheFile
if !exists {
err := Walk(cache, "", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
file := &cacheFile{
Path: path,
Size: info.Size(),
LastAccessTime: info.ModTime().UnixNano() / 1000000,
}
files = append(files, file)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("error building cache index: %v", err)
}
} else {
data, err := ReadFile(cache, ".cacheindex")
if err != nil {
return nil, fmt.Errorf("error reading cache index: %v", err)
}
if err := json.Unmarshal(data, &files); err != nil {
return nil, fmt.Errorf("error unmarshalling files: %v", err)
}
}
var currSize int64 = 0
set := sortedset.New()
for _, f := range files {
set.AddOrUpdate(f.Path, sortedset.SCORE(f.LastAccessTime), f)
currSize += f.Size
}
fs := &SizeCacheFS{
base: base,
cache: cache,
cacheSize: cacheSize,
cacheTime: cacheTime,
currSize: currSize,
files: set,
}
return fs, nil
}
func (u *SizeCacheFS) getCacheFile(name string) (info *cacheFile) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node == nil {
return nil
} else {
return node.Value.(*cacheFile)
}
}
func (u *SizeCacheFS) addToCache(info *cacheFile) error {
u.cacheL.Lock()
defer u.cacheL.Unlock()
// check if we aren't already inside
node := u.files.GetByKey(info.Path)
if node != nil {
file := node.Value.(*cacheFile)
u.currSize -= file.Size
}
// while we can pop files and the cache is full..
for u.currSize > 0 && u.currSize+info.Size > u.cacheSize {
node := u.files.PopMin()
// node CAN'T be nil as currSize > 0
file := node.Value.(*cacheFile)
if err := u.cache.Remove(file.Path); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.currSize -= file.Size
path := filepath.Dir(file.Path)
for path != "" && path != "." && path != "/" {
f, err := u.cache.Open(path)
if err != nil {
_ = f.Close()
return fmt.Errorf("error opening parent directory: %v", err)
}
dirs, err := f.Readdir(-1)
if err != nil {
_ = f.Close()
return fmt.Errorf("error reading parent directory: %v", err)
}
_ = f.Close()
if len(dirs) == 0 {
if err := u.cache.Remove(path); err != nil {
return fmt.Errorf("error removing parent directory: %v", err)
}
path = filepath.Dir(path)
} else {
break
}
}
}
u.files.AddOrUpdate(info.Path, sortedset.SCORE(info.LastAccessTime), info)
u.currSize += info.Size
return nil
}
func (u *SizeCacheFS) removeFromCache(name string) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node != nil {
// If we remove file that is open, the file will re-add itself in
// the cache on close. This is expected behavior as a removed open file
// will re-appear on close ?
u.files.Remove(name)
info := node.Value.(*cacheFile)
u.currSize -= info.Size
}
}
/*
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
*/
func (u *SizeCacheFS) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.cache.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
} else if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
} else {
return cacheMiss, nil, err
}
}
func (u *SizeCacheFS) copyToCache(name string) (*cacheFile, error) {
// If layer file exists, we need to remove it
// and replace it with current file
// TODO
// Get size, if size over our limit, evict one file
bfh, err := u.base.Open(name)
if err != nil {
if err == os.ErrNotExist {
return nil, err
} else {
return nil, fmt.Errorf("error opening base file: %v", err)
}
}
// First make sure the directory exists
exists, err := Exists(u.cache, filepath.Dir(name))
if err != nil {
return nil, err
}
if !exists {
err = u.cache.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return nil, err
}
}
// Create the file on the overlay
lfh, err := u.cache.Create(name)
if err != nil {
return nil, err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, fmt.Errorf("error copying layer to base: %v", err)
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, syscall.EIO
}
isDir := bfi.IsDir()
err = lfh.Close()
if err != nil {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, err
}
if err := bfh.Close(); err != nil {
return nil, fmt.Errorf("error closing base file: %v", err)
}
if err := u.cache.Chtimes(name, bfi.ModTime(), bfi.ModTime()); err != nil {
return nil, err
}
// if cache is stale and file already inside sorted set, we are just going to update it
// Create info
if !isDir {
info := &cacheFile{
Path: name,
Size: bfi.Size(),
LastAccessTime: time.Now().UnixNano() / 1000,
}
return info, nil
} else {
return nil, nil
}
}
func (u *SizeCacheFS) Chtimes(name string, atime, mtime time.Time) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chtimes(name, atime, mtime)
}
return u.base.Chtimes(name, atime, mtime)
}
func (u *SizeCacheFS) Chmod(name string, mode os.FileMode) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chmod(name, mode)
}
return u.base.Chmod(name, mode)
}
func (u *SizeCacheFS) Stat(name string) (os.FileInfo, error) {
return u.base.Stat(name)
}
func (u *SizeCacheFS) Rename(oldname, newname string) error {
exists, err := Exists(u.cache, oldname)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
info := u.getCacheFile(oldname)
u.removeFromCache(oldname)
info.Path = newname
if err := u.addToCache(info); err != nil {
return err
}
if err := u.cache.Rename(oldname, newname); err != nil {
return err
}
}
return u.base.Rename(oldname, newname)
}
func (u *SizeCacheFS) Remove(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return fmt.Errorf("error determining if file exists: %v", err)
}
// If cache file exists, update to ensure consistency
if exists {
if err := u.cache.Remove(name); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.removeFromCache(name)
}
return u.base.Remove(name)
}
func (u *SizeCacheFS) RemoveAll(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
err := Walk(u.cache, name, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return u.Remove(path)
} else {
return nil
}
})
if err != nil {
return err
}
// Remove the dirs
_ = u.cache.RemoveAll(name)
}
return u.base.RemoveAll(name)
}
func (u *SizeCacheFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) |
func (u *SizeCacheFS) Open(name string) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if !bfi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
case cacheStale:
if !fi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.cache.Open(name)
if err != nil && bfile == nil {
return nil, err
}
fi, err = u.cache.Stat(name)
if err != nil {
return nil, err
}
uf := NewSizeCacheFile(bfile, lfile, os.O_RDONLY, u, info)
return uf, nil
}
func (u *SizeCacheFS) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.cache.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *SizeCacheFS) Name() string {
return "SizeCacheFS"
}
func (u *SizeCacheFS) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.cache.MkdirAll(name, perm)
}
func (u *SizeCacheFS) Create(name string) (File, error) {
bfile, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfile, err := u.cache.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
_ = bfile.Close()
return nil, err
}
info := &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
// Ensure file is out
u.removeFromCache(name)
uf := NewSizeCacheFile(bfile, lfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, u, info)
return uf, nil
}
func (u *SizeCacheFS) Size() int64 {
return u.currSize
}
func (u *SizeCacheFS) Close() error {
// TODO close all open files
// Save index
var files []*cacheFile
nodes := u.files.GetByScoreRange(math.MinInt64, math.MaxInt64, nil)
for _, n := range nodes {
f := n.Value.(*cacheFile)
files = append(files, f)
}
data, err := json.Marshal(files)
if err != nil {
return fmt.Errorf("error marshalling files: %v", err)
}
if err := WriteFile(u.cache, ".cacheindex", data, 0644); err != nil {
return fmt.Errorf("error writing cache index: %v", err)
}
return nil
}
| {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
exists, err := Exists(u.base, name)
if err != nil {
return nil, fmt.Errorf("error determining if base file exists: %v", err)
}
if exists {
var err error
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
// It is not a dir, we cannot open a non existing dir
info = &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
}
}
var cacheFlag = flag
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
// Force read write mode
cacheFlag = (flag & (^os.O_WRONLY)) | os.O_RDWR
}
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.cache.OpenFile(name, cacheFlag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
uf := NewSizeCacheFile(bfi, lfi, flag, u, info)
return uf, nil
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.