file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn start(ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if !m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else {
PackageIdent::from_str(pkg_ident_str)?
};
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() && !c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o != "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l != "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b") | .multiple(true)
.number_of_values(1)
.help(
"Bind to another service to form a producer/consumer relationship, \
specified as name:service:group",
),
)
.arg(
Arg::with_name("NO_DOCKER_IMAGE")
.long("no-docker-image")
.short("d")
.help(
"Disable creation of the Docker image and only create a Kubernetes manifest",
),
)
.arg(
Arg::with_name("PKG_IDENT_OR_ARTIFACT")
.value_name("PKG_IDENT_OR_ARTIFACT")
.required(true)
.help("Habitat package identifier (ex: acme/redis)"),
)
}
fn valid_natural_number(val: String) -> result::Result<(), String> {
match val.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("{} is not a natural number", val)),
}
} | random_line_split | |
main.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn start(ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if !m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else {
PackageIdent::from_str(pkg_ident_str)?
};
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() && !c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o != "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l != "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b")
.multiple(true)
.number_of_values(1)
.help(
"Bind to another service to form a producer/consumer relationship, \
specified as name:service:group",
),
)
.arg(
Arg::with_name("NO_DOCKER_IMAGE")
.long("no-docker-image")
.short("d")
.help(
"Disable creation of the Docker image and only create a Kubernetes manifest",
),
)
.arg(
Arg::with_name("PKG_IDENT_OR_ARTIFACT")
.value_name("PKG_IDENT_OR_ARTIFACT")
.required(true)
.help("Habitat package identifier (ex: acme/redis)"),
)
}
fn valid_natural_number(val: String) -> result::Result<(), String> | {
match val.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("{} is not a natural number", val)),
}
} | identifier_body | |
config.go | //
// (C) Copyright 2018-2019 Intel Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
// The Government's rights to use, modify, reproduce, release, perform, display,
// or disclose this software are subject to the terms of the Apache License as
// provided in Contract No. 8F-30005.
// Any reproduction of computer software, computer software documentation, or
// portions thereof marked with this legend must also reproduce the markings.
//
package server
import (
"hash/fnv"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/log"
)
const (
configOut = ".daos_server.active.yml"
relConfExamplesPath = "utils/config/examples/"
msgBadConfig = "insufficient config file, see examples in "
msgConfigNoProvider = "provider not specified in config"
msgConfigNoPath = "no config path set"
msgConfigNoServers = "no servers specified in config"
msgConfigServerNoIface = "fabric interface not specified in config"
)
func (c *configuration) loadConfig() error {
if c.Path == "" {
return errors.New(msgConfigNoPath)
}
bytes, err := ioutil.ReadFile(c.Path)
if err != nil {
return errors.WithMessage(err, "reading file")
}
if err = c.parse(bytes); err != nil {
return errors.WithMessage(err, "parse failed; config contains invalid "+
"parameters and may be out of date, see server config examples")
}
return nil
}
func (c *configuration) saveConfig(filename string) error {
bytes, err := yaml.Marshal(c)
if err != nil {
return err
}
return ioutil.WriteFile(filename, bytes, 0644)
}
func (c *configuration) setPath(path string) error {
if path != "" {
c.Path = path
}
if !filepath.IsAbs(c.Path) {
newPath, err := c.ext.getAbsInstallPath(c.Path)
if err != nil {
return err
}
c.Path = newPath
}
return nil
}
// loadConfigOpts derives file location and parses configuration options
// from both config file and commandline flags.
func loadConfigOpts(cliOpts *cliOptions, host string) (
config configuration, err error) {
config = newConfiguration()
if err := config.setPath(cliOpts.ConfigPath); err != nil {
return config, errors.WithMessage(err, "set path")
}
if err := config.loadConfig(); err != nil {
return config, errors.WithMessagef(err, "loading %s", config.Path)
}
log.Debugf("DAOS config read from %s", config.Path)
// Override certificate support if specified in cliOpts
if cliOpts.Insecure {
config.TransportConfig.AllowInsecure = true
}
// get unique identifier to activate SPDK multiprocess mode
config.NvmeShmID = hash(host + strconv.Itoa(os.Getpid()))
if err = config.getIOParams(cliOpts); err != nil {
return config, errors.Wrap(
err, "failed to retrieve I/O service params")
}
if len(config.Servers) == 0 {
return config, errors.New("missing I/O service params")
}
for idx := range config.Servers {
config.Servers[idx].Hostname = host
}
return config, nil
}
// saveActiveConfig saves read-only active config, tries config dir then /tmp/
func saveActiveConfig(config *configuration) {
activeConfig := filepath.Join(filepath.Dir(config.Path), configOut)
eMsg := "Warning: active config could not be saved (%s)"
err := config.saveConfig(activeConfig)
if err != nil {
log.Debugf(eMsg, err)
activeConfig = filepath.Join("/tmp", configOut)
err = config.saveConfig(activeConfig)
if err != nil {
log.Debugf(eMsg, err)
}
}
if err == nil {
log.Debugf("Active config saved to %s (read-only)", activeConfig)
}
}
// hash produces unique int from string, mask MSB on conversion to signed int
func hash(s string) int {
h := fnv.New32a()
if _, err := h.Write([]byte(s)); err != nil {
panic(err) // should never happen
}
return int(h.Sum32() & 0x7FFFFFFF) // mask MSB of uint32 as this will be sign bit
}
// populateCliOpts populates options string slice for single I/O service
func (c *configuration) | (i int) error {
// avoid mutating subject during iteration, instead access through
// config/parent object
srv := &c.Servers[i]
srv.CliOpts = append(
srv.CliOpts,
"-t", strconv.Itoa(srv.Targets),
"-g", c.SystemName,
"-s", srv.ScmMount)
if c.Modules != "" {
srv.CliOpts = append(srv.CliOpts, "-m", c.Modules)
}
if c.Attach != "" {
srv.CliOpts = append(srv.CliOpts, "-a", c.Attach)
}
if srv.NrXsHelpers > 2 {
log.Errorf(
"invalid NrXsHelpers %d exceed [0, 2], "+
"using default value of 2", srv.NrXsHelpers)
srv.NrXsHelpers = 2
} else if srv.NrXsHelpers != 2 {
srv.CliOpts = append(
srv.CliOpts, "-x", strconv.Itoa(srv.NrXsHelpers))
}
if srv.FirstCore > 0 {
srv.CliOpts = append(
srv.CliOpts, "-f", strconv.Itoa(srv.FirstCore))
}
if c.SystemMap != "" {
srv.CliOpts = append(srv.CliOpts, "-y", c.SystemMap)
}
if srv.Rank != nil {
srv.CliOpts = append(
srv.CliOpts, "-r", srv.Rank.String())
}
if c.SocketDir != "" {
srv.CliOpts = append(srv.CliOpts, "-d", c.SocketDir)
}
if c.NvmeShmID > 0 {
// Add shm_id so I/O service can share spdk access to controllers
// with mgmtControlServer process. Currently not user
// configurable when starting daos_server, use default.
srv.CliOpts = append(
srv.CliOpts, "-i", strconv.Itoa(c.NvmeShmID))
}
return nil
}
// cmdlineOverride mutates configuration options based on commandline
// options overriding those loaded from configuration file.
//
// Current cli opts for daos_server also specified in config:
// port, mount path, targets, group, rank, socket dir
// Current cli opts to be passed to be stored by daos_server:
// modules, attach, map
func (c *configuration) cmdlineOverride(opts *cliOptions) {
// Populate options that can be provided on both the commandline and config.
if opts.Port > 0 {
c.Port = int(opts.Port)
}
if opts.Rank != nil {
// global rank parameter should only apply to first I/O service
c.Servers[0].Rank = opts.Rank
}
if opts.Insecure {
c.TransportConfig.AllowInsecure = true
}
// override each per-server config
for i := range c.Servers {
srv := &c.Servers[i]
if opts.MountPath != "" {
// override each per-server config in addition to global value
c.ScmMountPath = opts.MountPath
srv.ScmMount = opts.MountPath
} else if srv.ScmMount == "" {
// if scm not specified for server, apply global
srv.ScmMount = c.ScmMountPath
}
if opts.Cores > 0 {
log.Debugf("-c option deprecated, please use -t instead")
srv.Targets = int(opts.Cores)
}
// Targets should override Cores if specified in cmdline or
// config file.
if opts.Targets > 0 {
srv.Targets = int(opts.Targets)
}
if opts.NrXsHelpers != nil {
srv.NrXsHelpers = int(*opts.NrXsHelpers)
}
if opts.FirstCore > 0 {
srv.FirstCore = int(opts.FirstCore)
}
}
if opts.Group != "" {
c.SystemName = opts.Group
}
if opts.SocketDir != "" {
c.SocketDir = opts.SocketDir
}
if opts.Modules != nil {
c.Modules = *opts.Modules
}
if opts.Attach != nil {
c.Attach = *opts.Attach
}
if opts.Map != nil {
c.SystemMap = *opts.Map
}
}
// validateConfig asserts that config meets minimum requirements
func (c *configuration) validateConfig() error {
if c.Provider == "" {
return errors.New(msgConfigNoProvider)
}
if len(c.Servers) == 0 {
return errors.New(msgConfigNoServers)
}
for i, srv := range c.Servers {
if srv.FabricIface == "" {
return errors.Errorf(
msgConfigServerNoIface+" for I/O service %d", i)
}
}
return nil
}
// getIOParams builds commandline options and environment variables to provide
// to forked I/O service
func (c *configuration) getIOParams(cliOpts *cliOptions) error {
if err := c.validateConfig(); err != nil {
examplesPath, _ := c.ext.getAbsInstallPath(relConfExamplesPath)
return errors.WithMessagef(err, msgBadConfig+examplesPath)
}
// override config with commandline supplied options
c.cmdlineOverride(cliOpts)
for i := range c.Servers {
srv := &c.Servers[i]
if err := c.populateCliOpts(i); err != nil {
return errors.WithMessagef(
err,
"populating I/O service options")
}
// add to existing config file EnvVars
srv.EnvVars = append(
srv.EnvVars,
"CRT_PHY_ADDR_STR="+c.Provider,
"OFI_INTERFACE="+srv.FabricIface,
"D_LOG_MASK="+srv.LogMask,
"D_LOG_FILE="+srv.LogFile)
// populate only if non-zero
if srv.FabricIfacePort != 0 {
srv.EnvVars = append(
srv.EnvVars,
"OFI_PORT="+strconv.Itoa(srv.FabricIfacePort))
}
}
return nil
}
// populateEnv adds envs from config options to existing envs from user's shell
// overwriting any existing values for given key
func (c *configuration) populateEnv(i int, envs *[]string) {
for _, newEnv := range c.Servers[i].EnvVars {
key := strings.Split(newEnv, "=")[0]
// filter out any matching keys in envs then adds new value
*envs = common.Filter(
*envs,
func(s string) bool {
return key != strings.Split(s, "=")[0]
})
*envs = append(*envs, newEnv)
}
}
func (c *configuration) setLogging(name string) (*os.File, error) {
// Set log level mask for default logger from config.
switch c.ControlLogMask {
case cLogDebug:
log.Debugf("Switching control log level to DEBUG")
log.SetLevel(log.Debug)
case cLogError:
log.Debugf("Switching control log level to ERROR")
log.SetLevel(log.Error)
}
// Set log file for default logger if specified in config.
if c.ControlLogFile != "" {
f, err := common.AppendFile(c.ControlLogFile)
if err != nil {
return nil, errors.WithMessage(
err, "create log file")
}
log.Debugf(
"%s logging to file %s",
os.Args[0], c.ControlLogFile)
log.SetOutput(f)
return f, nil
}
// if no logfile specified, output from multiple hosts
// may get aggregated, prefix entries with hostname
log.NewDefaultLogger(log.Debug, name+" ", os.Stderr)
return nil, nil
}
| populateCliOpts | identifier_name |
config.go | //
// (C) Copyright 2018-2019 Intel Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
// The Government's rights to use, modify, reproduce, release, perform, display,
// or disclose this software are subject to the terms of the Apache License as
// provided in Contract No. 8F-30005.
// Any reproduction of computer software, computer software documentation, or
// portions thereof marked with this legend must also reproduce the markings.
//
package server
import (
"hash/fnv"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/log"
)
const (
configOut = ".daos_server.active.yml"
relConfExamplesPath = "utils/config/examples/"
msgBadConfig = "insufficient config file, see examples in "
msgConfigNoProvider = "provider not specified in config"
msgConfigNoPath = "no config path set"
msgConfigNoServers = "no servers specified in config"
msgConfigServerNoIface = "fabric interface not specified in config"
)
func (c *configuration) loadConfig() error {
if c.Path == "" {
return errors.New(msgConfigNoPath)
}
bytes, err := ioutil.ReadFile(c.Path)
if err != nil {
return errors.WithMessage(err, "reading file")
}
if err = c.parse(bytes); err != nil {
return errors.WithMessage(err, "parse failed; config contains invalid "+
"parameters and may be out of date, see server config examples")
}
return nil
}
func (c *configuration) saveConfig(filename string) error {
bytes, err := yaml.Marshal(c)
if err != nil {
return err
}
return ioutil.WriteFile(filename, bytes, 0644)
}
func (c *configuration) setPath(path string) error {
if path != "" {
c.Path = path
}
if !filepath.IsAbs(c.Path) {
newPath, err := c.ext.getAbsInstallPath(c.Path)
if err != nil {
return err
}
c.Path = newPath
}
return nil
}
// loadConfigOpts derives file location and parses configuration options
// from both config file and commandline flags.
func loadConfigOpts(cliOpts *cliOptions, host string) (
config configuration, err error) {
config = newConfiguration()
if err := config.setPath(cliOpts.ConfigPath); err != nil {
return config, errors.WithMessage(err, "set path")
}
if err := config.loadConfig(); err != nil {
return config, errors.WithMessagef(err, "loading %s", config.Path)
}
log.Debugf("DAOS config read from %s", config.Path)
// Override certificate support if specified in cliOpts
if cliOpts.Insecure {
config.TransportConfig.AllowInsecure = true
}
// get unique identifier to activate SPDK multiprocess mode
config.NvmeShmID = hash(host + strconv.Itoa(os.Getpid()))
if err = config.getIOParams(cliOpts); err != nil {
return config, errors.Wrap(
err, "failed to retrieve I/O service params")
}
if len(config.Servers) == 0 {
return config, errors.New("missing I/O service params")
}
for idx := range config.Servers {
config.Servers[idx].Hostname = host
}
return config, nil
}
// saveActiveConfig saves read-only active config, tries config dir then /tmp/
func saveActiveConfig(config *configuration) {
activeConfig := filepath.Join(filepath.Dir(config.Path), configOut)
eMsg := "Warning: active config could not be saved (%s)"
err := config.saveConfig(activeConfig)
if err != nil {
log.Debugf(eMsg, err)
activeConfig = filepath.Join("/tmp", configOut)
err = config.saveConfig(activeConfig)
if err != nil {
log.Debugf(eMsg, err)
}
}
if err == nil {
log.Debugf("Active config saved to %s (read-only)", activeConfig)
}
}
// hash produces unique int from string, mask MSB on conversion to signed int
func hash(s string) int {
h := fnv.New32a()
if _, err := h.Write([]byte(s)); err != nil {
panic(err) // should never happen
}
return int(h.Sum32() & 0x7FFFFFFF) // mask MSB of uint32 as this will be sign bit
}
// populateCliOpts populates options string slice for single I/O service
func (c *configuration) populateCliOpts(i int) error {
// avoid mutating subject during iteration, instead access through
// config/parent object
srv := &c.Servers[i]
srv.CliOpts = append(
srv.CliOpts,
"-t", strconv.Itoa(srv.Targets),
"-g", c.SystemName,
"-s", srv.ScmMount)
if c.Modules != "" {
srv.CliOpts = append(srv.CliOpts, "-m", c.Modules)
}
if c.Attach != "" {
srv.CliOpts = append(srv.CliOpts, "-a", c.Attach)
}
if srv.NrXsHelpers > 2 {
log.Errorf(
"invalid NrXsHelpers %d exceed [0, 2], "+
"using default value of 2", srv.NrXsHelpers)
srv.NrXsHelpers = 2
} else if srv.NrXsHelpers != 2 {
srv.CliOpts = append(
srv.CliOpts, "-x", strconv.Itoa(srv.NrXsHelpers))
}
if srv.FirstCore > 0 {
srv.CliOpts = append(
srv.CliOpts, "-f", strconv.Itoa(srv.FirstCore))
}
if c.SystemMap != "" {
srv.CliOpts = append(srv.CliOpts, "-y", c.SystemMap)
}
if srv.Rank != nil {
srv.CliOpts = append(
srv.CliOpts, "-r", srv.Rank.String())
}
if c.SocketDir != "" {
srv.CliOpts = append(srv.CliOpts, "-d", c.SocketDir)
}
if c.NvmeShmID > 0 {
// Add shm_id so I/O service can share spdk access to controllers
// with mgmtControlServer process. Currently not user
// configurable when starting daos_server, use default.
srv.CliOpts = append(
srv.CliOpts, "-i", strconv.Itoa(c.NvmeShmID))
}
return nil
}
// cmdlineOverride mutates configuration options based on commandline
// options overriding those loaded from configuration file.
//
// Current cli opts for daos_server also specified in config:
// port, mount path, targets, group, rank, socket dir
// Current cli opts to be passed to be stored by daos_server:
// modules, attach, map
func (c *configuration) cmdlineOverride(opts *cliOptions) {
// Populate options that can be provided on both the commandline and config.
if opts.Port > 0 {
c.Port = int(opts.Port)
}
if opts.Rank != nil {
// global rank parameter should only apply to first I/O service
c.Servers[0].Rank = opts.Rank
}
if opts.Insecure {
c.TransportConfig.AllowInsecure = true
}
// override each per-server config
for i := range c.Servers {
srv := &c.Servers[i]
if opts.MountPath != "" {
// override each per-server config in addition to global value
c.ScmMountPath = opts.MountPath
srv.ScmMount = opts.MountPath
} else if srv.ScmMount == "" {
// if scm not specified for server, apply global
srv.ScmMount = c.ScmMountPath
}
if opts.Cores > 0 {
log.Debugf("-c option deprecated, please use -t instead")
srv.Targets = int(opts.Cores)
}
// Targets should override Cores if specified in cmdline or
// config file.
if opts.Targets > 0 {
srv.Targets = int(opts.Targets)
}
if opts.NrXsHelpers != nil {
srv.NrXsHelpers = int(*opts.NrXsHelpers)
}
if opts.FirstCore > 0 {
srv.FirstCore = int(opts.FirstCore)
}
}
if opts.Group != "" {
c.SystemName = opts.Group
}
if opts.SocketDir != "" {
c.SocketDir = opts.SocketDir
}
if opts.Modules != nil {
c.Modules = *opts.Modules
}
if opts.Attach != nil {
c.Attach = *opts.Attach
}
if opts.Map != nil {
c.SystemMap = *opts.Map
}
}
// validateConfig asserts that config meets minimum requirements
func (c *configuration) validateConfig() error {
if c.Provider == "" {
return errors.New(msgConfigNoProvider)
}
if len(c.Servers) == 0 {
return errors.New(msgConfigNoServers)
}
for i, srv := range c.Servers {
if srv.FabricIface == "" {
return errors.Errorf(
msgConfigServerNoIface+" for I/O service %d", i)
}
}
return nil
}
// getIOParams builds commandline options and environment variables to provide
// to forked I/O service
func (c *configuration) getIOParams(cliOpts *cliOptions) error {
if err := c.validateConfig(); err != nil {
examplesPath, _ := c.ext.getAbsInstallPath(relConfExamplesPath)
return errors.WithMessagef(err, msgBadConfig+examplesPath) |
// override config with commandline supplied options
c.cmdlineOverride(cliOpts)
for i := range c.Servers {
srv := &c.Servers[i]
if err := c.populateCliOpts(i); err != nil {
return errors.WithMessagef(
err,
"populating I/O service options")
}
// add to existing config file EnvVars
srv.EnvVars = append(
srv.EnvVars,
"CRT_PHY_ADDR_STR="+c.Provider,
"OFI_INTERFACE="+srv.FabricIface,
"D_LOG_MASK="+srv.LogMask,
"D_LOG_FILE="+srv.LogFile)
// populate only if non-zero
if srv.FabricIfacePort != 0 {
srv.EnvVars = append(
srv.EnvVars,
"OFI_PORT="+strconv.Itoa(srv.FabricIfacePort))
}
}
return nil
}
// populateEnv adds envs from config options to existing envs from user's shell
// overwriting any existing values for given key
func (c *configuration) populateEnv(i int, envs *[]string) {
for _, newEnv := range c.Servers[i].EnvVars {
key := strings.Split(newEnv, "=")[0]
// filter out any matching keys in envs then adds new value
*envs = common.Filter(
*envs,
func(s string) bool {
return key != strings.Split(s, "=")[0]
})
*envs = append(*envs, newEnv)
}
}
func (c *configuration) setLogging(name string) (*os.File, error) {
// Set log level mask for default logger from config.
switch c.ControlLogMask {
case cLogDebug:
log.Debugf("Switching control log level to DEBUG")
log.SetLevel(log.Debug)
case cLogError:
log.Debugf("Switching control log level to ERROR")
log.SetLevel(log.Error)
}
// Set log file for default logger if specified in config.
if c.ControlLogFile != "" {
f, err := common.AppendFile(c.ControlLogFile)
if err != nil {
return nil, errors.WithMessage(
err, "create log file")
}
log.Debugf(
"%s logging to file %s",
os.Args[0], c.ControlLogFile)
log.SetOutput(f)
return f, nil
}
// if no logfile specified, output from multiple hosts
// may get aggregated, prefix entries with hostname
log.NewDefaultLogger(log.Debug, name+" ", os.Stderr)
return nil, nil
} | } | random_line_split |
config.go | //
// (C) Copyright 2018-2019 Intel Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
// The Government's rights to use, modify, reproduce, release, perform, display,
// or disclose this software are subject to the terms of the Apache License as
// provided in Contract No. 8F-30005.
// Any reproduction of computer software, computer software documentation, or
// portions thereof marked with this legend must also reproduce the markings.
//
package server
import (
"hash/fnv"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/log"
)
const (
configOut = ".daos_server.active.yml"
relConfExamplesPath = "utils/config/examples/"
msgBadConfig = "insufficient config file, see examples in "
msgConfigNoProvider = "provider not specified in config"
msgConfigNoPath = "no config path set"
msgConfigNoServers = "no servers specified in config"
msgConfigServerNoIface = "fabric interface not specified in config"
)
func (c *configuration) loadConfig() error {
if c.Path == "" {
return errors.New(msgConfigNoPath)
}
bytes, err := ioutil.ReadFile(c.Path)
if err != nil {
return errors.WithMessage(err, "reading file")
}
if err = c.parse(bytes); err != nil {
return errors.WithMessage(err, "parse failed; config contains invalid "+
"parameters and may be out of date, see server config examples")
}
return nil
}
func (c *configuration) saveConfig(filename string) error |
func (c *configuration) setPath(path string) error {
if path != "" {
c.Path = path
}
if !filepath.IsAbs(c.Path) {
newPath, err := c.ext.getAbsInstallPath(c.Path)
if err != nil {
return err
}
c.Path = newPath
}
return nil
}
// loadConfigOpts derives file location and parses configuration options
// from both config file and commandline flags.
func loadConfigOpts(cliOpts *cliOptions, host string) (
config configuration, err error) {
config = newConfiguration()
if err := config.setPath(cliOpts.ConfigPath); err != nil {
return config, errors.WithMessage(err, "set path")
}
if err := config.loadConfig(); err != nil {
return config, errors.WithMessagef(err, "loading %s", config.Path)
}
log.Debugf("DAOS config read from %s", config.Path)
// Override certificate support if specified in cliOpts
if cliOpts.Insecure {
config.TransportConfig.AllowInsecure = true
}
// get unique identifier to activate SPDK multiprocess mode
config.NvmeShmID = hash(host + strconv.Itoa(os.Getpid()))
if err = config.getIOParams(cliOpts); err != nil {
return config, errors.Wrap(
err, "failed to retrieve I/O service params")
}
if len(config.Servers) == 0 {
return config, errors.New("missing I/O service params")
}
for idx := range config.Servers {
config.Servers[idx].Hostname = host
}
return config, nil
}
// saveActiveConfig saves read-only active config, tries config dir then /tmp/
func saveActiveConfig(config *configuration) {
activeConfig := filepath.Join(filepath.Dir(config.Path), configOut)
eMsg := "Warning: active config could not be saved (%s)"
err := config.saveConfig(activeConfig)
if err != nil {
log.Debugf(eMsg, err)
activeConfig = filepath.Join("/tmp", configOut)
err = config.saveConfig(activeConfig)
if err != nil {
log.Debugf(eMsg, err)
}
}
if err == nil {
log.Debugf("Active config saved to %s (read-only)", activeConfig)
}
}
// hash produces unique int from string, mask MSB on conversion to signed int
func hash(s string) int {
h := fnv.New32a()
if _, err := h.Write([]byte(s)); err != nil {
panic(err) // should never happen
}
return int(h.Sum32() & 0x7FFFFFFF) // mask MSB of uint32 as this will be sign bit
}
// populateCliOpts populates options string slice for single I/O service
func (c *configuration) populateCliOpts(i int) error {
// avoid mutating subject during iteration, instead access through
// config/parent object
srv := &c.Servers[i]
srv.CliOpts = append(
srv.CliOpts,
"-t", strconv.Itoa(srv.Targets),
"-g", c.SystemName,
"-s", srv.ScmMount)
if c.Modules != "" {
srv.CliOpts = append(srv.CliOpts, "-m", c.Modules)
}
if c.Attach != "" {
srv.CliOpts = append(srv.CliOpts, "-a", c.Attach)
}
if srv.NrXsHelpers > 2 {
log.Errorf(
"invalid NrXsHelpers %d exceed [0, 2], "+
"using default value of 2", srv.NrXsHelpers)
srv.NrXsHelpers = 2
} else if srv.NrXsHelpers != 2 {
srv.CliOpts = append(
srv.CliOpts, "-x", strconv.Itoa(srv.NrXsHelpers))
}
if srv.FirstCore > 0 {
srv.CliOpts = append(
srv.CliOpts, "-f", strconv.Itoa(srv.FirstCore))
}
if c.SystemMap != "" {
srv.CliOpts = append(srv.CliOpts, "-y", c.SystemMap)
}
if srv.Rank != nil {
srv.CliOpts = append(
srv.CliOpts, "-r", srv.Rank.String())
}
if c.SocketDir != "" {
srv.CliOpts = append(srv.CliOpts, "-d", c.SocketDir)
}
if c.NvmeShmID > 0 {
// Add shm_id so I/O service can share spdk access to controllers
// with mgmtControlServer process. Currently not user
// configurable when starting daos_server, use default.
srv.CliOpts = append(
srv.CliOpts, "-i", strconv.Itoa(c.NvmeShmID))
}
return nil
}
// cmdlineOverride mutates configuration options based on commandline
// options overriding those loaded from configuration file.
//
// Current cli opts for daos_server also specified in config:
// port, mount path, targets, group, rank, socket dir
// Current cli opts to be passed to be stored by daos_server:
// modules, attach, map
func (c *configuration) cmdlineOverride(opts *cliOptions) {
// Populate options that can be provided on both the commandline and config.
if opts.Port > 0 {
c.Port = int(opts.Port)
}
if opts.Rank != nil {
// global rank parameter should only apply to first I/O service
c.Servers[0].Rank = opts.Rank
}
if opts.Insecure {
c.TransportConfig.AllowInsecure = true
}
// override each per-server config
for i := range c.Servers {
srv := &c.Servers[i]
if opts.MountPath != "" {
// override each per-server config in addition to global value
c.ScmMountPath = opts.MountPath
srv.ScmMount = opts.MountPath
} else if srv.ScmMount == "" {
// if scm not specified for server, apply global
srv.ScmMount = c.ScmMountPath
}
if opts.Cores > 0 {
log.Debugf("-c option deprecated, please use -t instead")
srv.Targets = int(opts.Cores)
}
// Targets should override Cores if specified in cmdline or
// config file.
if opts.Targets > 0 {
srv.Targets = int(opts.Targets)
}
if opts.NrXsHelpers != nil {
srv.NrXsHelpers = int(*opts.NrXsHelpers)
}
if opts.FirstCore > 0 {
srv.FirstCore = int(opts.FirstCore)
}
}
if opts.Group != "" {
c.SystemName = opts.Group
}
if opts.SocketDir != "" {
c.SocketDir = opts.SocketDir
}
if opts.Modules != nil {
c.Modules = *opts.Modules
}
if opts.Attach != nil {
c.Attach = *opts.Attach
}
if opts.Map != nil {
c.SystemMap = *opts.Map
}
}
// validateConfig asserts that config meets minimum requirements
func (c *configuration) validateConfig() error {
if c.Provider == "" {
return errors.New(msgConfigNoProvider)
}
if len(c.Servers) == 0 {
return errors.New(msgConfigNoServers)
}
for i, srv := range c.Servers {
if srv.FabricIface == "" {
return errors.Errorf(
msgConfigServerNoIface+" for I/O service %d", i)
}
}
return nil
}
// getIOParams builds commandline options and environment variables to provide
// to forked I/O service
func (c *configuration) getIOParams(cliOpts *cliOptions) error {
if err := c.validateConfig(); err != nil {
examplesPath, _ := c.ext.getAbsInstallPath(relConfExamplesPath)
return errors.WithMessagef(err, msgBadConfig+examplesPath)
}
// override config with commandline supplied options
c.cmdlineOverride(cliOpts)
for i := range c.Servers {
srv := &c.Servers[i]
if err := c.populateCliOpts(i); err != nil {
return errors.WithMessagef(
err,
"populating I/O service options")
}
// add to existing config file EnvVars
srv.EnvVars = append(
srv.EnvVars,
"CRT_PHY_ADDR_STR="+c.Provider,
"OFI_INTERFACE="+srv.FabricIface,
"D_LOG_MASK="+srv.LogMask,
"D_LOG_FILE="+srv.LogFile)
// populate only if non-zero
if srv.FabricIfacePort != 0 {
srv.EnvVars = append(
srv.EnvVars,
"OFI_PORT="+strconv.Itoa(srv.FabricIfacePort))
}
}
return nil
}
// populateEnv adds envs from config options to existing envs from user's shell
// overwriting any existing values for given key
func (c *configuration) populateEnv(i int, envs *[]string) {
for _, newEnv := range c.Servers[i].EnvVars {
key := strings.Split(newEnv, "=")[0]
// filter out any matching keys in envs then adds new value
*envs = common.Filter(
*envs,
func(s string) bool {
return key != strings.Split(s, "=")[0]
})
*envs = append(*envs, newEnv)
}
}
func (c *configuration) setLogging(name string) (*os.File, error) {
// Set log level mask for default logger from config.
switch c.ControlLogMask {
case cLogDebug:
log.Debugf("Switching control log level to DEBUG")
log.SetLevel(log.Debug)
case cLogError:
log.Debugf("Switching control log level to ERROR")
log.SetLevel(log.Error)
}
// Set log file for default logger if specified in config.
if c.ControlLogFile != "" {
f, err := common.AppendFile(c.ControlLogFile)
if err != nil {
return nil, errors.WithMessage(
err, "create log file")
}
log.Debugf(
"%s logging to file %s",
os.Args[0], c.ControlLogFile)
log.SetOutput(f)
return f, nil
}
// if no logfile specified, output from multiple hosts
// may get aggregated, prefix entries with hostname
log.NewDefaultLogger(log.Debug, name+" ", os.Stderr)
return nil, nil
}
| {
bytes, err := yaml.Marshal(c)
if err != nil {
return err
}
return ioutil.WriteFile(filename, bytes, 0644)
} | identifier_body |
config.go | //
// (C) Copyright 2018-2019 Intel Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
// The Government's rights to use, modify, reproduce, release, perform, display,
// or disclose this software are subject to the terms of the Apache License as
// provided in Contract No. 8F-30005.
// Any reproduction of computer software, computer software documentation, or
// portions thereof marked with this legend must also reproduce the markings.
//
package server
import (
"hash/fnv"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/log"
)
const (
configOut = ".daos_server.active.yml"
relConfExamplesPath = "utils/config/examples/"
msgBadConfig = "insufficient config file, see examples in "
msgConfigNoProvider = "provider not specified in config"
msgConfigNoPath = "no config path set"
msgConfigNoServers = "no servers specified in config"
msgConfigServerNoIface = "fabric interface not specified in config"
)
func (c *configuration) loadConfig() error {
if c.Path == "" {
return errors.New(msgConfigNoPath)
}
bytes, err := ioutil.ReadFile(c.Path)
if err != nil {
return errors.WithMessage(err, "reading file")
}
if err = c.parse(bytes); err != nil {
return errors.WithMessage(err, "parse failed; config contains invalid "+
"parameters and may be out of date, see server config examples")
}
return nil
}
func (c *configuration) saveConfig(filename string) error {
bytes, err := yaml.Marshal(c)
if err != nil {
return err
}
return ioutil.WriteFile(filename, bytes, 0644)
}
func (c *configuration) setPath(path string) error {
if path != "" {
c.Path = path
}
if !filepath.IsAbs(c.Path) {
newPath, err := c.ext.getAbsInstallPath(c.Path)
if err != nil {
return err
}
c.Path = newPath
}
return nil
}
// loadConfigOpts derives file location and parses configuration options
// from both config file and commandline flags.
func loadConfigOpts(cliOpts *cliOptions, host string) (
config configuration, err error) {
config = newConfiguration()
if err := config.setPath(cliOpts.ConfigPath); err != nil |
if err := config.loadConfig(); err != nil {
return config, errors.WithMessagef(err, "loading %s", config.Path)
}
log.Debugf("DAOS config read from %s", config.Path)
// Override certificate support if specified in cliOpts
if cliOpts.Insecure {
config.TransportConfig.AllowInsecure = true
}
// get unique identifier to activate SPDK multiprocess mode
config.NvmeShmID = hash(host + strconv.Itoa(os.Getpid()))
if err = config.getIOParams(cliOpts); err != nil {
return config, errors.Wrap(
err, "failed to retrieve I/O service params")
}
if len(config.Servers) == 0 {
return config, errors.New("missing I/O service params")
}
for idx := range config.Servers {
config.Servers[idx].Hostname = host
}
return config, nil
}
// saveActiveConfig saves read-only active config, tries config dir then /tmp/
func saveActiveConfig(config *configuration) {
activeConfig := filepath.Join(filepath.Dir(config.Path), configOut)
eMsg := "Warning: active config could not be saved (%s)"
err := config.saveConfig(activeConfig)
if err != nil {
log.Debugf(eMsg, err)
activeConfig = filepath.Join("/tmp", configOut)
err = config.saveConfig(activeConfig)
if err != nil {
log.Debugf(eMsg, err)
}
}
if err == nil {
log.Debugf("Active config saved to %s (read-only)", activeConfig)
}
}
// hash produces unique int from string, mask MSB on conversion to signed int
func hash(s string) int {
h := fnv.New32a()
if _, err := h.Write([]byte(s)); err != nil {
panic(err) // should never happen
}
return int(h.Sum32() & 0x7FFFFFFF) // mask MSB of uint32 as this will be sign bit
}
// populateCliOpts populates options string slice for single I/O service
func (c *configuration) populateCliOpts(i int) error {
// avoid mutating subject during iteration, instead access through
// config/parent object
srv := &c.Servers[i]
srv.CliOpts = append(
srv.CliOpts,
"-t", strconv.Itoa(srv.Targets),
"-g", c.SystemName,
"-s", srv.ScmMount)
if c.Modules != "" {
srv.CliOpts = append(srv.CliOpts, "-m", c.Modules)
}
if c.Attach != "" {
srv.CliOpts = append(srv.CliOpts, "-a", c.Attach)
}
if srv.NrXsHelpers > 2 {
log.Errorf(
"invalid NrXsHelpers %d exceed [0, 2], "+
"using default value of 2", srv.NrXsHelpers)
srv.NrXsHelpers = 2
} else if srv.NrXsHelpers != 2 {
srv.CliOpts = append(
srv.CliOpts, "-x", strconv.Itoa(srv.NrXsHelpers))
}
if srv.FirstCore > 0 {
srv.CliOpts = append(
srv.CliOpts, "-f", strconv.Itoa(srv.FirstCore))
}
if c.SystemMap != "" {
srv.CliOpts = append(srv.CliOpts, "-y", c.SystemMap)
}
if srv.Rank != nil {
srv.CliOpts = append(
srv.CliOpts, "-r", srv.Rank.String())
}
if c.SocketDir != "" {
srv.CliOpts = append(srv.CliOpts, "-d", c.SocketDir)
}
if c.NvmeShmID > 0 {
// Add shm_id so I/O service can share spdk access to controllers
// with mgmtControlServer process. Currently not user
// configurable when starting daos_server, use default.
srv.CliOpts = append(
srv.CliOpts, "-i", strconv.Itoa(c.NvmeShmID))
}
return nil
}
// cmdlineOverride mutates configuration options based on commandline
// options overriding those loaded from configuration file.
//
// Current cli opts for daos_server also specified in config:
// port, mount path, targets, group, rank, socket dir
// Current cli opts to be passed to be stored by daos_server:
// modules, attach, map
func (c *configuration) cmdlineOverride(opts *cliOptions) {
// Populate options that can be provided on both the commandline and config.
if opts.Port > 0 {
c.Port = int(opts.Port)
}
if opts.Rank != nil {
// global rank parameter should only apply to first I/O service
c.Servers[0].Rank = opts.Rank
}
if opts.Insecure {
c.TransportConfig.AllowInsecure = true
}
// override each per-server config
for i := range c.Servers {
srv := &c.Servers[i]
if opts.MountPath != "" {
// override each per-server config in addition to global value
c.ScmMountPath = opts.MountPath
srv.ScmMount = opts.MountPath
} else if srv.ScmMount == "" {
// if scm not specified for server, apply global
srv.ScmMount = c.ScmMountPath
}
if opts.Cores > 0 {
log.Debugf("-c option deprecated, please use -t instead")
srv.Targets = int(opts.Cores)
}
// Targets should override Cores if specified in cmdline or
// config file.
if opts.Targets > 0 {
srv.Targets = int(opts.Targets)
}
if opts.NrXsHelpers != nil {
srv.NrXsHelpers = int(*opts.NrXsHelpers)
}
if opts.FirstCore > 0 {
srv.FirstCore = int(opts.FirstCore)
}
}
if opts.Group != "" {
c.SystemName = opts.Group
}
if opts.SocketDir != "" {
c.SocketDir = opts.SocketDir
}
if opts.Modules != nil {
c.Modules = *opts.Modules
}
if opts.Attach != nil {
c.Attach = *opts.Attach
}
if opts.Map != nil {
c.SystemMap = *opts.Map
}
}
// validateConfig asserts that config meets minimum requirements
func (c *configuration) validateConfig() error {
if c.Provider == "" {
return errors.New(msgConfigNoProvider)
}
if len(c.Servers) == 0 {
return errors.New(msgConfigNoServers)
}
for i, srv := range c.Servers {
if srv.FabricIface == "" {
return errors.Errorf(
msgConfigServerNoIface+" for I/O service %d", i)
}
}
return nil
}
// getIOParams builds commandline options and environment variables to provide
// to forked I/O service
func (c *configuration) getIOParams(cliOpts *cliOptions) error {
if err := c.validateConfig(); err != nil {
examplesPath, _ := c.ext.getAbsInstallPath(relConfExamplesPath)
return errors.WithMessagef(err, msgBadConfig+examplesPath)
}
// override config with commandline supplied options
c.cmdlineOverride(cliOpts)
for i := range c.Servers {
srv := &c.Servers[i]
if err := c.populateCliOpts(i); err != nil {
return errors.WithMessagef(
err,
"populating I/O service options")
}
// add to existing config file EnvVars
srv.EnvVars = append(
srv.EnvVars,
"CRT_PHY_ADDR_STR="+c.Provider,
"OFI_INTERFACE="+srv.FabricIface,
"D_LOG_MASK="+srv.LogMask,
"D_LOG_FILE="+srv.LogFile)
// populate only if non-zero
if srv.FabricIfacePort != 0 {
srv.EnvVars = append(
srv.EnvVars,
"OFI_PORT="+strconv.Itoa(srv.FabricIfacePort))
}
}
return nil
}
// populateEnv adds envs from config options to existing envs from user's shell
// overwriting any existing values for given key
func (c *configuration) populateEnv(i int, envs *[]string) {
for _, newEnv := range c.Servers[i].EnvVars {
key := strings.Split(newEnv, "=")[0]
// filter out any matching keys in envs then adds new value
*envs = common.Filter(
*envs,
func(s string) bool {
return key != strings.Split(s, "=")[0]
})
*envs = append(*envs, newEnv)
}
}
func (c *configuration) setLogging(name string) (*os.File, error) {
// Set log level mask for default logger from config.
switch c.ControlLogMask {
case cLogDebug:
log.Debugf("Switching control log level to DEBUG")
log.SetLevel(log.Debug)
case cLogError:
log.Debugf("Switching control log level to ERROR")
log.SetLevel(log.Error)
}
// Set log file for default logger if specified in config.
if c.ControlLogFile != "" {
f, err := common.AppendFile(c.ControlLogFile)
if err != nil {
return nil, errors.WithMessage(
err, "create log file")
}
log.Debugf(
"%s logging to file %s",
os.Args[0], c.ControlLogFile)
log.SetOutput(f)
return f, nil
}
// if no logfile specified, output from multiple hosts
// may get aggregated, prefix entries with hostname
log.NewDefaultLogger(log.Debug, name+" ", os.Stderr)
return nil, nil
}
| {
return config, errors.WithMessage(err, "set path")
} | conditional_block |
main.js | // $(".seven-wonders").append('<div class="great-wall">the great wall of china</div>')
$(".seven-wonders").append('<div class="welcome"></div>')
$(".welcome").append('<p id="wel">welcome to our website! Lets take you on a tour </p>')
$(".welcome").append('<img id="image1" src="https://velvetescape.com/wp-content/uploads/2011/11/IMG_1953-1280x920.jpg"/>')
$(".welcome").append('<img id="image2" src="https://whc.unesco.org/uploads/thumbs/site_0252_0003-500-375-20080319163312.jpg"/>')
$(".welcome").append('<img id="image3" src="https://cdn.britannica.com/s:700x500/09/129609-050-6EE5ECEE/Moon-Great-Wall-of-China.jpg"/>')
$(".welcome").append('<img id="image4" src="https://cdn.britannica.com/s:700x500/36/162636-050-932C5D49/Colosseum-Rome-Italy.jpg"/>')
$("#image1").height(300)
$("#image1").width(300)
$("#image2").height(300)
$("#image2").width(300)
$("#image3").height(300)
$("#image3").width(300)
$("#image4").height(300)
$("#image4").width(300)
$(".welcome").append('<button id="start">lets start</button>')
$(".seven-wonders").append('<div class="Petra2"></div>')
$(".seven-wonders").append('<div class="Petra-info" style="display: none"></div>')
$('.Petra-info').append('<p id="petra1">Petra one of the world wonders</p>')
$(".Petra-info").append('<br>')
$(".Petra-info").append('<img id="image-petra1" src="https://velvetescape.com/wp-content/uploads/2011/11/IMG_1953-1280x920.jpg"/>')
$(".Petra-info").append('<img id="image-petra2" src="https://www.toledohotel.jo/content/images/thumbs/0000683_petra_550.jpeg"/>')
$(".Petra-info").append('<img id="image-petra3" src="https://s27363.pcdn.co/wp-content/uploads/2016/05/Treasury-Petra.jpg.optimal.jpg"/>')
$(".Petra-info").append('<img id="image-petra4" src="https://jordantrail.org/wp-content/uploads/2017/01/4-Little-Petra-to-Petra8.jpg"/>')
$("#image-petra1").height(300)
$("#image-petra1").width(300)
$("#image-petra2").height(300)
$("#image-petra2").width(300)
$("#image-petra3").height(300)
$("#image-petra3").width(300)
$("#image-petra4").height(300)
$("#image-petra4").width(300)
$(".Petra-info").append('<br>')
$(".Petra-info").append('<br>')
$(".Petra-info").append('<br>')
$(".Petra-info").append('<p id="petro">Declared a World Heritage Site in 1985, Petra was the capital of the Nabataean empire of King Aretas IV, and likely existed in its prime from 9 B.C. to A.D. 40. The members of this civilization proved to be early experts in manipulating water technology, constructing intricate tunnels and water chambers, which helped create an pseudo-oasis. A number of incredible structures carved into stone, a 4,000-seat amphitheater and the El-Deir monastery have also helped the site earn its fame.</p>')
$(".Petra-info").append('<br>')
$(".Petra-info").append('<br>')
$('.Petra-info').append('<iframe id="petravideo" ;width="642" height="361" src="https://www.youtube.com/embed/SZ5JjLdzQ1o" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
$(".Petra-info").append('<br>')
$(".Petra-info").append('<button id="next">next</button>')
$(".Petra-info").append('<br>')
$(".Petra-info").append('<button id="return">return</button>')
$(".seven-wonders").append('<div class="Taj-Mahal3" style="display: none"></div>')
$('.Taj-Mahal3').append('<p id="taj-mahal">Taj mahal India</p>')
$('.Taj-Mahal3').append('<p id="para1">Taj Mahal, also spelled Tadj Mahall, mausoleum complex in Agra, western Uttar Pradesh state, northern India. It is situated in the eastern part of the city on the southern (right) bank of the Yamuna (Jumna) River. Agra Fort (Red Fort), also on the right bank of the Yamuna, is about 1 mile (1.6 km) west of the Taj Mahal.</p>')
$('#para1').css('color','#bccad6')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<img id="taj2" src="https://whc.unesco.org/uploads/thumbs/site_0252_0003-500-375-20080319163312.jpg"/>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<p id=para2> Taj Mahal is distinguished as the finest example of Mughal architecture, a blend of Indian, Persian, and Islamic styles. One of the most beautiful structural compositions in the world, the Taj Mahal is also one of the world’s most iconic monuments, visited by millions of tourists each year. The complex was designated a UNESCO World Heritage site in 1983.</p>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<img id="taj3" src="https://cdn.britannica.com/s:700x500/08/172208-004-7CCDFF72/portrait-emperor-Mughal-Shah-Jahan.jpg"/>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<p id=para3> The Taj Mahal was built by the Mughal emperor Shah Jahān (reigned 1628–58) to immortalize his wife Mumtaz Mahal (“Chosen One of the Palace”). She died in childbirth in 1631, after having been the emperor’s inseparable companion since their marriage in 1612. The plans for the complex have been attributed to various architects of the period, though the chief architect was probably Ustad Aḥmad Lahawrī, an Indian of Persian descent. The five principal elements of the complex—main gateway, garden, mosque, jawāb (literally “answer”; a building mirroring the mosque), and mausoleum (including its four minarets)—were conceived and designed as a unified entity according to the tenets of Mughal building practice, which allowed no subsequent addition or alteration. Building commenced about 1632.</p>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<img id="taj4" src="https://whc.unesco.org/uploads/thumbs/site_0252_0008-750-0-20151104113424.jpg"/>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<p id=para4> Over the centuries the Taj Mahal has been subject to neglect and decay. A major restoration was carried out at the beginning of the 20th century under the direction of Lord Curzon, then the British viceroy of India. More recently, air pollution caused by emissions from foundries and other nearby factories and exhaust from motor vehicles has damaged the mausoleum, notably its marble facade. A number of measures have been taken to reduce the threat to the monument, among them the closing of some foundries and the installation of pollution-control equipment at others, the creation of a parkland buffer zone around the complex, and the banning of nearby vehicular traffic. A restoration and research program for the Taj Mahal was initiated in 1998. Progress in improving environmental conditions around the monument has been slow.</p>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<p id=para5> here are some more mazing pictures </p>')
$(".Taj-Mahal3").append('<br>')
$(".Taj-Mahal3").append('<img id="taj5" src="https://cdn.britannica.com/s:700x500/43/178143-050-1E8D6013/marble-portal-Taj-Mahal-India-Agra.jpg"/>')
$(".Taj-Mahal3").append('<br>')
$('#para2').css('color','#bccad6')
$('#para3').css('color','#bccad6')
$('#para4').css('color','#bccad6')
$('#para5').css('color','#bccad6')
$(".Taj-Mahal3").append('<button id="next2">next</button>')
$(".Taj-Mahal3").append('<button id="return1">return</button>')
$(".seven-wonders").append('<div class="the-great-wall" style="display: none"></div>')
$('.the-great-wall').append('<p id="great">The great wall of china</p>')
$(".the-great-wall").append('<br>')
$(".the-great-wall").append('<img id="picture-of-great-wall" src="https://cdn.britannica.com/s:700x500/09/129609-050-6EE5ECEE/Moon-Great-Wall-of-China.jpg"/>')
$(".the-great-wall").append('<br>')
$(".the-great-wall").append('<p id="great1">The Great Wall of China is an ancient series of walls and fortifications, totaling more than 13,000 miles in length, located in northern China. Perhaps the most recognizable symbol of China and its long and vivid history, the Great Wall was originally conceived by Emperor Qin Shi Huang in the third century B.C. as a means of preventing incursions from barbarian nomads. The best-known and best-preserved section of the Great Wall was built in the 14th through 17th centuries A.D., during the Ming dynasty. Though the Great Wall never effectively prevented invaders from entering China, it came to function as a powerful symbol of Chinese civilization enduring strength.</p>')
$('#great1').css('color','#bccad6')
$(".the-great-wall").append('<br>')
$(".the-great-wall").append('<iframe id="video-china" ;" width="642" height="361" src="https://www.youtube.com/embed/23oHqNEqRyo" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
$(".the-great-wall").append('<br>')
$(".the-great-wall").append('<br>')
$(".the-great-wall").append('<button id="next3">next</button>')
$(".the-great-wall").append('<button id="return2">return</button>')
$(".seven-wonders").append('<div class="colosseum" style="display: none"> </div>')
$(".colosseum").append('<br>')
$('.colosseum').append('<p id="col">Colosseum - Italy </p>')
$(".colosseum").append('<br>')
//rome 1
$(".colosseum").append('<img id="colosseum1" src="https://cdn.britannica.com/s:700x500/36/162636-050-932C5D49/Colosseum-Rome-Italy.jpg"/>')
$(".colosseum").append('<div class="rome1">Even after the decadent Roman emperor Nero took his own life in A.D. 68, his misrule and excesses fueled a series of civil wars. No fewer than four emperors took the throne in the tumultuous year after Nero’s death; the fourth, Vespasian, would end up ruling for 10 years (A.D. 69-79). The Flavian emperors, as Vespasian and his sons Titus (79-81) and Domitian (81-96) were known, attempted to tone down the excesses of the Roman court, restore Senate authority and promote public welfare.</div>')
$(".colosseum").append('<br>')
$(".colosseum").append('<br>')
//rome2
$(".colosseum").append('<img id="colosseum2" src="https://cdn.britannica.com/s:700x500/34/179234-050-8E54EB21/Interior-Colosseum-Rome.jpg"/>')
$(".colosseum").append('<div class="rome2">Even after the decadent Roman emperor Nero took his own life in A.D. 68, his misrule and excesses fueled a series of civil wars. No fewer than four emperors took the throne in the tumultuous year after Nero’s death; the fourth, Vespasian, would end up ruling for 10 years (A.D. 69-79). The Flavian emperors, as Vespasian and his sons Titus (79-81) and Domitian (81-96) were known, attempted to tone down the excesses of the Roman court, restore Senate authority and promote public welfare.</div>')
$(".colosseum").append('<br>')
$(".colosseum").append('<br>')
//rome 3
| $(".colosseum").append('<br>')
$(".colosseum").append('<br>')
//rome 4
$(".colosseum").append('<img id="colosseum4" src="https://cdn.civitatis.com/italia/roma/galeria/coliseo-roma-noche.jpg"/>')
$(".colosseum").append('<div class="rome4">In medieval times, the Colosseum was used as a church, then as a fortress by two prominent Roman families, the Frangipane and the Annibaldi. The Colosseum was damaged by lightning and earthquakes and, even more severely, by vandalism and pollution. All the marble seats and decorative materials disappeared, as the site was treated as little more than a quarry for more than 1,000 years. Preservation of the Colosseum began in earnest in the 19th century, with notable efforts led by Pius VIII, and a restoration project was undertaken in the 1990s. It has long been one of Rome’s major tourist attractions, receiving close to seven million visitors annually. Changing exhibitions relating to the culture of ancient Rome are regularly mounted.</div>')
$(".colosseum").append('<br>')
$(".colosseum").append('<br>')
//rome 5
$(".colosseum").append('<div class="rome4">here are some more pictures </div>')
$(".colosseum").append('<img id="colosseum5" src="https://cdn.civitatis.com/italia/roma/galeria/coliseo-interior.jpg"/>')
$(".colosseum").append('<br>')
$(".colosseum").append('<br>')
$(".colosseum").append('<img id="colosseum5" src="https://thumbs-prod.si-cdn.com/69Jy9fQc96rnoAtv6JSfD2jI_nU=/fit-in/1072x0/https://public-media.si-cdn.com/filer/Colosseum-Secrets-workers-area-10.jpg"/>')
$(".colosseum").append('<br>')
$(".colosseum").append('<br>')
$(".colosseum").append('<img id="colosseum5" src="https://thumbs-prod.si-cdn.com/XXxTEOlh1XZ367LYoWWRCTU9P6E=/fit-in/1072x0/https://public-media.si-cdn.com/filer/Colosseum-Secrets-Heinz-Jurgen-Beste-2.jpg"/>')
$(".colosseum").append('<br>')
$('.rome1').css('color','#bccad6')
$('.rome2').css('color','#bccad6')
$('.rome3').css('color','#bccad6')
$('.rome4').css('color','#bccad6')
$(".colosseum").append('<button id="next4">next</button>')
$(".colosseum").append('<button id="return3">return</button>')
$(".seven-wonders").append('<div class="extra" style="display: none"></div>')
$(".extra").append("where are you going <br> <br><input id='nameId' <input/> ")
var buttonE=$('<button class="name"> lets go </button>');
$(".extra").append(buttonE);
$(".name").click(function (){
var x=$("#nameId").val();
if (x.length===0){
alert ("You have to write your name!")
}
else
$('<p id="place"> </p>').append(x + " is going nowhere" ).appendTo(".extra")
$(".extra").append('<img id="corona" src="https://pbs.twimg.com/media/EYJf-gTXgAEugwl.jpg"/>')
$("#nameId").val('')
})
$(".extra").append('<br>')
$(".extra").append('<br>')
$(".extra").append('<br>')
$(".extra").append('<br>')
$(".extra").append('<br>')
$(".extra").append('<button id="final0">next</button>')
$(".seven-wonders").append('<div class="end" style="display: none"></div>')
$('.end').append('<p id="end-card">Thats it ! We hope you enjoyed it! Thankyou!</p>')
$(".end").append('<button id="final">back to home page</button>')
$(".colosseum").append('<br>')
$('#start').click(function(){
$(".welcome").hide()
$(".Petra-info").show()
})
$('#next').click(function(){
$(".Petra-info").hide()
$('.Taj-Mahal3').show()
})
$('#return').click(function(){
$(".Petra-info").hide()
$('.welcome').show()
})
$('#return1').click(function(){
$(".Taj-Mahal3").hide()
$('.Petra-info').show()
})
$('#return2').click(function(){
$(".the-great-wall").hide()
$('.Taj-Mahal3').show()
})
$('#final0').click(function(){
$(".extra").hide()
$('.end').show()
})
$('#final').click(function(){
$(".end").hide()
$('.welcome').show()
})
$('#return3').click(function(){
$(".colosseum").hide()
$('.the-great-wall').show()
})
$('#next2').click(function(){
$('.Taj-Mahal3').hide()
$('.the-great-wall').show()
})
$('#next3').click(function(){
$('.the-great-wall').hide()
$('.colosseum').show()
})
$('#next4').click(function(){
$('.colosseum').hide()
$('.extra').show()
}) | $(".colosseum").append('<img id="colosseum3" src="https://cdn.britannica.com/s:700x500/69/94469-050-4340CF77/Colosseum-Rome.jpg"/>')
$(".colosseum").append('<div class="rome3">At present the Colosseum is, along with the Vatican City, Romes greatest tourist attraction. Each year 6 million tourists visit it. On 7 July 2007 the Colosseum became one of the Seven Wonders of the Modern World.</div>') | random_line_split |
DebugModule.go | package sweetiebot
import (
"fmt"
"sort"
"strings"
"strconv"
"github.com/blackhole12/discordgo"
)
type DebugModule struct {
}
// Name of the module
func (w *DebugModule) Name() string {
return "Debug"
}
// Commands in the module
func (w *DebugModule) Commands() []Command {
return []Command{
&echoCommand{},
&echoEmbedCommand{},
&disableCommand{},
&enableCommand{},
&updateCommand{},
&dumpTablesCommand{},
&listGuildsCommand{},
&announceCommand{},
&removeAliasCommand{},
&getAuditCommand{},
}
}
// Description of the module
func (w *DebugModule) Description() string {
return "Contains various debugging commands. Some of these commands can only be run by the bot owner."
}
type echoCommand struct {
}
func (c *echoCommand) Name() string {
return "Echo"
}
func (c *echoCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```You have to tell me to say something, silly!```", false, nil
}
arg := args[0]
if channelregex.MatchString(arg) {
if len(args) < 2 {
return "```You have to tell me to say something, silly!```", false, nil
}
info.SendMessage(arg[2:len(arg)-1], msg.Content[indices[1]:])
return "", false, nil
}
return msg.Content[indices[0]:], false, nil
}
func (c *echoCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Makes Sweetie Bot say the given sentence in `#channel`, or in the current channel if no channel is provided.",
Params: []CommandUsageParam{
{Name: "#channel", Desc: "The channel to echo the message in. If omitted, message is sent to this channel.", Optional: true},
{Name: "arbitrary string", Desc: "An arbitrary string for Sweetie Bot to say.", Optional: false},
},
}
}
func (c *echoCommand) UsageShort() string {
return "Makes Sweetie Bot say something in the given channel."
}
type echoEmbedCommand struct {
}
func (c *echoEmbedCommand) Name() string {
return "EchoEmbed"
}
func (c *echoEmbedCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```You have to tell me to say something, silly!```", false, nil
}
arg := args[0]
channel := msg.ChannelID
i := 0
if channelregex.MatchString(arg) {
if len(args) < 2 {
return "```You have to tell me to say something, silly!```", false, nil
}
channel = arg[2 : len(arg)-1]
i++
}
if i >= len(args) {
return "```A URL is mandatory or discord won't send the embed message for some stupid reason.```", false, nil
}
url := args[i]
i++
var color uint64 = 0xFFFFFFFF
if i < len(args) {
if colorregex.MatchString(args[i]) {
if len(args) < i+2 {
return "```You have to tell me to say something, silly!```", false, nil
}
color, _ = strconv.ParseUint(args[i][2:], 16, 64)
i++
}
}
fields := make([]*discordgo.MessageEmbedField, 0, len(args)-i)
for i < len(args) {
s := strings.SplitN(args[i], ":", 2)
if len(s) < 2 {
return "```Malformed key:value pair. If your key value pair has a space in it, remember to put it in parenthesis!```", false, nil
}
fields = append(fields, &discordgo.MessageEmbedField{Name: s[0], Value: s[1], Inline: true})
i++
}
embed := &discordgo.MessageEmbed{ | URL: url,
Name: msg.Author.Username + "#" + msg.Author.Discriminator,
IconURL: fmt.Sprintf("https://cdn.discordapp.com/avatars/%s/%s.jpg", msg.Author.ID, msg.Author.Avatar),
},
Color: int(color),
Fields: fields,
}
info.SendEmbed(channel, embed)
return "", false, nil
}
func (c *echoEmbedCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Makes Sweetie Bot assemble a rich text embed and echo it in the given channel",
Params: []CommandUsageParam{
{Name: "#channel", Desc: "The channel to echo the message in. If omitted, message is sent to this channel.", Optional: true},
{Name: "URL", Desc: "URL for the author to link to.", Optional: false},
{Name: "0xC0L0R", Desc: "Color of the embed box.", Optional: true},
{Name: "key:value", Desc: "A key:value pair of fields to display in the embed. Remember to use quotes around the *entire* key:value pair if either the key or the value have spaces.", Optional: true, Variadic: true},
},
}
}
func (c *echoEmbedCommand) UsageShort() string {
return "Makes Sweetie Bot echo a rich text embed in a given channel."
}
func SetCommandEnable(args []string, enable bool, success string, info *GuildInfo, channelID string) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```No module or command specified.Use " + info.config.Basic.CommandPrefix + "help with no arguments to list all modules and commands.```", false, nil
}
name := strings.ToLower(args[0])
for _, v := range info.modules {
if strings.ToLower(v.Name()) == name {
cmds := v.Commands()
for _, v := range cmds {
str := strings.ToLower(v.Name())
if enable {
delete(info.config.Modules.CommandDisabled, str)
} else {
CheckMapNilBool(&info.config.Modules.CommandDisabled)
info.config.Modules.CommandDisabled[str] = true
}
}
if enable {
delete(info.config.Modules.Disabled, name)
} else {
CheckMapNilBool(&info.config.Modules.Disabled)
info.config.Modules.Disabled[name] = true
}
info.SaveConfig()
return "", false, DumpCommandsModules(channelID, info, "", "**Success!** "+args[0]+success)
}
}
for _, v := range info.commands {
str := strings.ToLower(v.Name())
if str == name {
if enable {
delete(info.config.Modules.CommandDisabled, str)
} else {
CheckMapNilBool(&info.config.Modules.CommandDisabled)
info.config.Modules.CommandDisabled[str] = true
}
info.SaveConfig()
return "", false, DumpCommandsModules(channelID, info, "", "**Success!** "+args[0]+success)
}
}
return "```The " + args[0] + " module/command does not exist. Use " + info.config.Basic.CommandPrefix + "help with no arguments to list all modules and commands.```", false, nil
}
type disableCommand struct {
}
func (c *disableCommand) Name() string {
return "Disable"
}
func (c *disableCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return SetCommandEnable(args, false, " was disabled.", info, msg.ChannelID)
}
func (c *disableCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Disables the given module or command, if possible. If the module/command is already disabled, does nothing.",
Params: []CommandUsageParam{
{Name: "module|command", Desc: "The module or command to disable. You do not need to specify the parent module of a command, only the command name itself.", Optional: false},
},
}
}
func (c *disableCommand) UsageShort() string { return "Disables the given module/command, if possible." }
type enableCommand struct {
}
func (c *enableCommand) Name() string {
return "Enable"
}
func (c *enableCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return SetCommandEnable(args, true, " was enabled.", info, msg.ChannelID)
}
func (c *enableCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Enables the given module or command, if possible. If the module/command is already enabled, does nothing.",
Params: []CommandUsageParam{
{Name: "module|command", Desc: "The module or command to enable. You do not need to specify the parent module of a command, only the command name itself.", Optional: false},
},
}
}
func (c *enableCommand) UsageShort() string { return "Enables the given module/command." }
func (c *enableCommand) Roles() []string { return []string{"Princesses", "Royal Guard"} }
func (c *enableCommand) Channels() []string { return []string{} }
type updateCommand struct {
}
func (c *updateCommand) Name() string {
return "Update"
}
func (c *updateCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
/*sb.log.Log("Update command called, current PID: ", os.Getpid())
err := exec.Command("./update.sh", strconv.Itoa(os.Getpid())).Start()
if err != nil {
sb.log.Log("Command.Start() error: ", err.Error())
return "```Could not start update script!```"
}*/
sb.guildsLock.RLock()
defer sb.guildsLock.RUnlock()
for _, v := range sb.guilds {
if v.config.Log.Channel > 0 {
v.SendMessage(SBitoa(v.config.Log.Channel), "```Shutting down for update...```")
}
}
sb.quit.set(true) // Instead of trying to call a batch script, we run the bot inside an infinite loop batch script and just shut it off when we want to update
return "```Shutting down for update...```", false, nil
}
func (c *updateCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Tells sweetiebot to shut down, calls an update script, rebuilds the code, and then restarts."}
}
func (c *updateCommand) UsageShort() string { return "[RESTRICTED] Updates sweetiebot." }
func (c *updateCommand) Roles() []string { return []string{"Princesses"} }
func (c *updateCommand) Channels() []string { return []string{} }
type dumpTablesCommand struct {
}
func (c *dumpTablesCommand) Name() string {
return "DumpTables"
}
func (c *dumpTablesCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return "```\n" + sb.db.GetTableCounts() + "```", false, nil
}
func (c *dumpTablesCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Dumps table row counts."}
}
func (c *dumpTablesCommand) UsageShort() string { return "Dumps table row counts." }
type guildSlice []*discordgo.Guild
func (s guildSlice) Len() int {
return len(s)
}
func (s guildSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s guildSlice) Less(i, j int) bool {
if s[i].MemberCount > len(s[i].Members) {
i = s[i].MemberCount
} else {
i = len(s[i].Members)
}
if s[j].MemberCount > len(s[j].Members) {
j = s[j].MemberCount
} else {
j = len(s[j].Members)
}
return i > j
}
type listGuildsCommand struct {
}
func (c *listGuildsCommand) Name() string {
return "ListGuilds"
}
func (c *listGuildsCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
sb.dg.State.RLock()
guilds := append([]*discordgo.Guild{}, sb.dg.State.Guilds...)
sb.dg.State.RUnlock()
sort.Sort(guildSlice(guilds))
s := make([]string, 0, len(guilds))
private := 0
for _, v := range guilds {
if !isOwner {
sb.guildsLock.RLock()
g, ok := sb.guilds[SBatoi(v.ID)]
sb.guildsLock.RUnlock()
if ok && g.config.Basic.Importable {
s = append(s, PartialSanitize(v.Name))
} else {
private++
}
} else {
username := "<@" + v.OwnerID + ">"
if sb.db.status.get() {
m, _, _, _ := sb.db.GetUser(SBatoi(v.OwnerID))
if m != nil {
username = m.Username + "#" + m.Discriminator
}
}
count := v.MemberCount
if count < len(v.Members) {
count = len(v.Members)
}
s = append(s, PartialSanitize(fmt.Sprintf("%v (%v) - %v", v.Name, count, username)))
}
}
return fmt.Sprintf("```Sweetie has joined these servers:\n%s\n\n+ %v private servers (Basic.Importable is false)```", strings.Join(s, "\n"), private), len(s) > 8, nil
}
func (c *listGuildsCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Lists the servers that sweetiebot has joined."}
}
func (c *listGuildsCommand) UsageShort() string { return "Lists servers." }
type announceCommand struct {
}
func (c *announceCommand) Name() string {
return "Announce"
}
func (c *announceCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
arg := msg.Content[indices[0]:]
sb.guildsLock.RLock()
defer sb.guildsLock.RUnlock()
for _, v := range sb.guilds {
if v.config.Log.Channel > 0 {
v.SendMessage(SBitoa(v.config.Log.Channel), "<@&"+SBitoa(v.config.Basic.AlertRole)+"> "+arg)
}
}
return "", false, nil
}
func (c *announceCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Restricted command that announces a message to all the log channels of all servers.",
Params: []CommandUsageParam{
{Name: "arbitrary string", Desc: "An arbitrary string for Sweetie Bot to say.", Optional: false},
},
}
}
func (c *announceCommand) UsageShort() string { return "[RESTRICTED] Announcement command." }
type removeAliasCommand struct {
}
func (c *removeAliasCommand) Name() string {
return "RemoveAlias"
}
func (c *removeAliasCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
if len(args) < 1 {
return "```You must PING the user you want to remove an alias from.```", false, nil
}
if len(args) < 2 {
return "```You must provide an alias to remove.```", false, nil
}
if !sb.db.CheckStatus() {
return "```A temporary database outage is preventing this command from being executed.```", false, nil
}
sb.db.RemoveAlias(PingAtoi(args[0]), msg.Content[indices[1]:])
return "```Attempted to remove the alias. Use " + info.config.Basic.CommandPrefix + "aka to check if it worked.```", false, nil
}
func (c *removeAliasCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Restricted command that removes the alias for a given user. The user must be pinged, and the alias must match precisely.",
Params: []CommandUsageParam{
{Name: "user", Desc: "A ping to a specific user in the format @User.", Optional: false},
{Name: "alias", Desc: "The *exact* name of the alias to remove.", Optional: false},
},
}
}
func (c *removeAliasCommand) UsageShort() string { return "[RESTRICTED] Removes an alias." }
type getAuditCommand struct {
}
func (c *getAuditCommand) Name() string {
return "GetAudit"
}
func (c *getAuditCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
var low uint64
var high uint64 = 10
var user *uint64
var search string
if !sb.db.CheckStatus() {
return "```A temporary database outage is preventing this command from being executed.```", false, nil
}
for i := 0; i < len(args); i++ {
if len(args[i]) > 0 {
switch args[i][0] {
case '<', '@':
if args[i][0] == '@' || (len(args[i]) > 1 && args[i][1] == '@') {
var IDs []uint64
if args[i][0] == '@' {
IDs = FindUsername(args[i][1:], info)
} else {
IDs = []uint64{SBatoi(StripPing(args[i]))}
}
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + args[i] + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info, true), "\n") + "```", len(IDs) > 5, nil
}
user = &IDs[0]
break
}
fallthrough
case '$', '!':
if args[i][0] != '!' {
search = "%"
}
if args[i][0] == '$' {
search += msg.Content[indices[i]+1:] + "%"
} else {
search += msg.Content[indices[i]:] + "%"
}
i = len(args)
default:
s := strings.SplitN(args[i], "-", 2)
if len(s) == 1 {
high = SBatoi(s[0])
} else if len(s) > 1 {
low = SBatoi(s[0]) - 1
high = SBatoi(s[1])
}
}
}
}
r := sb.db.GetAuditRows(low, high, user, search, SBatoi(info.ID))
ret := []string{"```Matching Audit Log entries:```"}
for _, v := range r {
ret = append(ret, fmt.Sprintf("[%s] %s: %s", ApplyTimezone(v.Timestamp, info, msg.Author).Format("1/2 3:04:05PM"), v.Author, v.Message))
}
return strings.Join(ret, "\n"), len(ret) > 12, nil
}
func (c *getAuditCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Allows admins to inspect the audit log.",
Params: []CommandUsageParam{
{Name: "range", Desc: "If this is a single number, the number of results to return. If it's a range in the form 999-9999, returns the given range of audit log entries, up to a maximum of 50 in one call. Defaults to displaying 1-10.", Optional: true},
{Name: "user", Desc: "Must be in the form of @user, either as an actual ping or just part of the users name. If included, filters results to just that user. If there are spaces in the username, you must use quotes.", Optional: true},
{Name: "arbitrary string", Desc: "An arbitrary string starting with either `!` or `$`. `!` will search for an exact command (regardless of what your command prefix has been set to), whereas `$` will simply search for the string anywhere in the audit log. This will eat up all remaining arguments, so put the user and the range BEFORE specifying the search string, and don't use quotes!", Optional: true},
},
}
}
func (c *getAuditCommand) UsageShort() string { return "Inspects the audit log." } | Type: "rich",
Author: &discordgo.MessageEmbedAuthor{ | random_line_split |
DebugModule.go | package sweetiebot
import (
"fmt"
"sort"
"strings"
"strconv"
"github.com/blackhole12/discordgo"
)
type DebugModule struct {
}
// Name of the module
func (w *DebugModule) Name() string {
return "Debug"
}
// Commands in the module
func (w *DebugModule) Commands() []Command {
return []Command{
&echoCommand{},
&echoEmbedCommand{},
&disableCommand{},
&enableCommand{},
&updateCommand{},
&dumpTablesCommand{},
&listGuildsCommand{},
&announceCommand{},
&removeAliasCommand{},
&getAuditCommand{},
}
}
// Description of the module
func (w *DebugModule) Description() string {
return "Contains various debugging commands. Some of these commands can only be run by the bot owner."
}
type echoCommand struct {
}
func (c *echoCommand) Name() string {
return "Echo"
}
func (c *echoCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```You have to tell me to say something, silly!```", false, nil
}
arg := args[0]
if channelregex.MatchString(arg) {
if len(args) < 2 {
return "```You have to tell me to say something, silly!```", false, nil
}
info.SendMessage(arg[2:len(arg)-1], msg.Content[indices[1]:])
return "", false, nil
}
return msg.Content[indices[0]:], false, nil
}
func (c *echoCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Makes Sweetie Bot say the given sentence in `#channel`, or in the current channel if no channel is provided.",
Params: []CommandUsageParam{
{Name: "#channel", Desc: "The channel to echo the message in. If omitted, message is sent to this channel.", Optional: true},
{Name: "arbitrary string", Desc: "An arbitrary string for Sweetie Bot to say.", Optional: false},
},
}
}
func (c *echoCommand) UsageShort() string {
return "Makes Sweetie Bot say something in the given channel."
}
type echoEmbedCommand struct {
}
func (c *echoEmbedCommand) Name() string {
return "EchoEmbed"
}
func (c *echoEmbedCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```You have to tell me to say something, silly!```", false, nil
}
arg := args[0]
channel := msg.ChannelID
i := 0
if channelregex.MatchString(arg) {
if len(args) < 2 {
return "```You have to tell me to say something, silly!```", false, nil
}
channel = arg[2 : len(arg)-1]
i++
}
if i >= len(args) {
return "```A URL is mandatory or discord won't send the embed message for some stupid reason.```", false, nil
}
url := args[i]
i++
var color uint64 = 0xFFFFFFFF
if i < len(args) {
if colorregex.MatchString(args[i]) {
if len(args) < i+2 {
return "```You have to tell me to say something, silly!```", false, nil
}
color, _ = strconv.ParseUint(args[i][2:], 16, 64)
i++
}
}
fields := make([]*discordgo.MessageEmbedField, 0, len(args)-i)
for i < len(args) {
s := strings.SplitN(args[i], ":", 2)
if len(s) < 2 {
return "```Malformed key:value pair. If your key value pair has a space in it, remember to put it in parenthesis!```", false, nil
}
fields = append(fields, &discordgo.MessageEmbedField{Name: s[0], Value: s[1], Inline: true})
i++
}
embed := &discordgo.MessageEmbed{
Type: "rich",
Author: &discordgo.MessageEmbedAuthor{
URL: url,
Name: msg.Author.Username + "#" + msg.Author.Discriminator,
IconURL: fmt.Sprintf("https://cdn.discordapp.com/avatars/%s/%s.jpg", msg.Author.ID, msg.Author.Avatar),
},
Color: int(color),
Fields: fields,
}
info.SendEmbed(channel, embed)
return "", false, nil
}
func (c *echoEmbedCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Makes Sweetie Bot assemble a rich text embed and echo it in the given channel",
Params: []CommandUsageParam{
{Name: "#channel", Desc: "The channel to echo the message in. If omitted, message is sent to this channel.", Optional: true},
{Name: "URL", Desc: "URL for the author to link to.", Optional: false},
{Name: "0xC0L0R", Desc: "Color of the embed box.", Optional: true},
{Name: "key:value", Desc: "A key:value pair of fields to display in the embed. Remember to use quotes around the *entire* key:value pair if either the key or the value have spaces.", Optional: true, Variadic: true},
},
}
}
func (c *echoEmbedCommand) UsageShort() string {
return "Makes Sweetie Bot echo a rich text embed in a given channel."
}
func SetCommandEnable(args []string, enable bool, success string, info *GuildInfo, channelID string) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```No module or command specified.Use " + info.config.Basic.CommandPrefix + "help with no arguments to list all modules and commands.```", false, nil
}
name := strings.ToLower(args[0])
for _, v := range info.modules {
if strings.ToLower(v.Name()) == name {
cmds := v.Commands()
for _, v := range cmds {
str := strings.ToLower(v.Name())
if enable {
delete(info.config.Modules.CommandDisabled, str)
} else {
CheckMapNilBool(&info.config.Modules.CommandDisabled)
info.config.Modules.CommandDisabled[str] = true
}
}
if enable {
delete(info.config.Modules.Disabled, name)
} else {
CheckMapNilBool(&info.config.Modules.Disabled)
info.config.Modules.Disabled[name] = true
}
info.SaveConfig()
return "", false, DumpCommandsModules(channelID, info, "", "**Success!** "+args[0]+success)
}
}
for _, v := range info.commands {
str := strings.ToLower(v.Name())
if str == name {
if enable {
delete(info.config.Modules.CommandDisabled, str)
} else {
CheckMapNilBool(&info.config.Modules.CommandDisabled)
info.config.Modules.CommandDisabled[str] = true
}
info.SaveConfig()
return "", false, DumpCommandsModules(channelID, info, "", "**Success!** "+args[0]+success)
}
}
return "```The " + args[0] + " module/command does not exist. Use " + info.config.Basic.CommandPrefix + "help with no arguments to list all modules and commands.```", false, nil
}
type disableCommand struct {
}
func (c *disableCommand) Name() string {
return "Disable"
}
func (c *disableCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return SetCommandEnable(args, false, " was disabled.", info, msg.ChannelID)
}
func (c *disableCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Disables the given module or command, if possible. If the module/command is already disabled, does nothing.",
Params: []CommandUsageParam{
{Name: "module|command", Desc: "The module or command to disable. You do not need to specify the parent module of a command, only the command name itself.", Optional: false},
},
}
}
func (c *disableCommand) UsageShort() string { return "Disables the given module/command, if possible." }
type enableCommand struct {
}
func (c *enableCommand) Name() string {
return "Enable"
}
func (c *enableCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return SetCommandEnable(args, true, " was enabled.", info, msg.ChannelID)
}
func (c *enableCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Enables the given module or command, if possible. If the module/command is already enabled, does nothing.",
Params: []CommandUsageParam{
{Name: "module|command", Desc: "The module or command to enable. You do not need to specify the parent module of a command, only the command name itself.", Optional: false},
},
}
}
func (c *enableCommand) UsageShort() string { return "Enables the given module/command." }
func (c *enableCommand) Roles() []string { return []string{"Princesses", "Royal Guard"} }
func (c *enableCommand) Channels() []string { return []string{} }
type updateCommand struct {
}
func (c *updateCommand) Name() string {
return "Update"
}
func (c *updateCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
/*sb.log.Log("Update command called, current PID: ", os.Getpid())
err := exec.Command("./update.sh", strconv.Itoa(os.Getpid())).Start()
if err != nil {
sb.log.Log("Command.Start() error: ", err.Error())
return "```Could not start update script!```"
}*/
sb.guildsLock.RLock()
defer sb.guildsLock.RUnlock()
for _, v := range sb.guilds {
if v.config.Log.Channel > 0 {
v.SendMessage(SBitoa(v.config.Log.Channel), "```Shutting down for update...```")
}
}
sb.quit.set(true) // Instead of trying to call a batch script, we run the bot inside an infinite loop batch script and just shut it off when we want to update
return "```Shutting down for update...```", false, nil
}
func (c *updateCommand) Usage(info *GuildInfo) *CommandUsage |
func (c *updateCommand) UsageShort() string { return "[RESTRICTED] Updates sweetiebot." }
func (c *updateCommand) Roles() []string { return []string{"Princesses"} }
func (c *updateCommand) Channels() []string { return []string{} }
type dumpTablesCommand struct {
}
func (c *dumpTablesCommand) Name() string {
return "DumpTables"
}
func (c *dumpTablesCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return "```\n" + sb.db.GetTableCounts() + "```", false, nil
}
func (c *dumpTablesCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Dumps table row counts."}
}
func (c *dumpTablesCommand) UsageShort() string { return "Dumps table row counts." }
type guildSlice []*discordgo.Guild
func (s guildSlice) Len() int {
return len(s)
}
func (s guildSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s guildSlice) Less(i, j int) bool {
if s[i].MemberCount > len(s[i].Members) {
i = s[i].MemberCount
} else {
i = len(s[i].Members)
}
if s[j].MemberCount > len(s[j].Members) {
j = s[j].MemberCount
} else {
j = len(s[j].Members)
}
return i > j
}
type listGuildsCommand struct {
}
func (c *listGuildsCommand) Name() string {
return "ListGuilds"
}
func (c *listGuildsCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
sb.dg.State.RLock()
guilds := append([]*discordgo.Guild{}, sb.dg.State.Guilds...)
sb.dg.State.RUnlock()
sort.Sort(guildSlice(guilds))
s := make([]string, 0, len(guilds))
private := 0
for _, v := range guilds {
if !isOwner {
sb.guildsLock.RLock()
g, ok := sb.guilds[SBatoi(v.ID)]
sb.guildsLock.RUnlock()
if ok && g.config.Basic.Importable {
s = append(s, PartialSanitize(v.Name))
} else {
private++
}
} else {
username := "<@" + v.OwnerID + ">"
if sb.db.status.get() {
m, _, _, _ := sb.db.GetUser(SBatoi(v.OwnerID))
if m != nil {
username = m.Username + "#" + m.Discriminator
}
}
count := v.MemberCount
if count < len(v.Members) {
count = len(v.Members)
}
s = append(s, PartialSanitize(fmt.Sprintf("%v (%v) - %v", v.Name, count, username)))
}
}
return fmt.Sprintf("```Sweetie has joined these servers:\n%s\n\n+ %v private servers (Basic.Importable is false)```", strings.Join(s, "\n"), private), len(s) > 8, nil
}
func (c *listGuildsCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Lists the servers that sweetiebot has joined."}
}
func (c *listGuildsCommand) UsageShort() string { return "Lists servers." }
type announceCommand struct {
}
func (c *announceCommand) Name() string {
return "Announce"
}
func (c *announceCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
arg := msg.Content[indices[0]:]
sb.guildsLock.RLock()
defer sb.guildsLock.RUnlock()
for _, v := range sb.guilds {
if v.config.Log.Channel > 0 {
v.SendMessage(SBitoa(v.config.Log.Channel), "<@&"+SBitoa(v.config.Basic.AlertRole)+"> "+arg)
}
}
return "", false, nil
}
func (c *announceCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Restricted command that announces a message to all the log channels of all servers.",
Params: []CommandUsageParam{
{Name: "arbitrary string", Desc: "An arbitrary string for Sweetie Bot to say.", Optional: false},
},
}
}
func (c *announceCommand) UsageShort() string { return "[RESTRICTED] Announcement command." }
type removeAliasCommand struct {
}
func (c *removeAliasCommand) Name() string {
return "RemoveAlias"
}
func (c *removeAliasCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
if len(args) < 1 {
return "```You must PING the user you want to remove an alias from.```", false, nil
}
if len(args) < 2 {
return "```You must provide an alias to remove.```", false, nil
}
if !sb.db.CheckStatus() {
return "```A temporary database outage is preventing this command from being executed.```", false, nil
}
sb.db.RemoveAlias(PingAtoi(args[0]), msg.Content[indices[1]:])
return "```Attempted to remove the alias. Use " + info.config.Basic.CommandPrefix + "aka to check if it worked.```", false, nil
}
func (c *removeAliasCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Restricted command that removes the alias for a given user. The user must be pinged, and the alias must match precisely.",
Params: []CommandUsageParam{
{Name: "user", Desc: "A ping to a specific user in the format @User.", Optional: false},
{Name: "alias", Desc: "The *exact* name of the alias to remove.", Optional: false},
},
}
}
func (c *removeAliasCommand) UsageShort() string { return "[RESTRICTED] Removes an alias." }
type getAuditCommand struct {
}
func (c *getAuditCommand) Name() string {
return "GetAudit"
}
func (c *getAuditCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
var low uint64
var high uint64 = 10
var user *uint64
var search string
if !sb.db.CheckStatus() {
return "```A temporary database outage is preventing this command from being executed.```", false, nil
}
for i := 0; i < len(args); i++ {
if len(args[i]) > 0 {
switch args[i][0] {
case '<', '@':
if args[i][0] == '@' || (len(args[i]) > 1 && args[i][1] == '@') {
var IDs []uint64
if args[i][0] == '@' {
IDs = FindUsername(args[i][1:], info)
} else {
IDs = []uint64{SBatoi(StripPing(args[i]))}
}
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + args[i] + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info, true), "\n") + "```", len(IDs) > 5, nil
}
user = &IDs[0]
break
}
fallthrough
case '$', '!':
if args[i][0] != '!' {
search = "%"
}
if args[i][0] == '$' {
search += msg.Content[indices[i]+1:] + "%"
} else {
search += msg.Content[indices[i]:] + "%"
}
i = len(args)
default:
s := strings.SplitN(args[i], "-", 2)
if len(s) == 1 {
high = SBatoi(s[0])
} else if len(s) > 1 {
low = SBatoi(s[0]) - 1
high = SBatoi(s[1])
}
}
}
}
r := sb.db.GetAuditRows(low, high, user, search, SBatoi(info.ID))
ret := []string{"```Matching Audit Log entries:```"}
for _, v := range r {
ret = append(ret, fmt.Sprintf("[%s] %s: %s", ApplyTimezone(v.Timestamp, info, msg.Author).Format("1/2 3:04:05PM"), v.Author, v.Message))
}
return strings.Join(ret, "\n"), len(ret) > 12, nil
}
func (c *getAuditCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Allows admins to inspect the audit log.",
Params: []CommandUsageParam{
{Name: "range", Desc: "If this is a single number, the number of results to return. If it's a range in the form 999-9999, returns the given range of audit log entries, up to a maximum of 50 in one call. Defaults to displaying 1-10.", Optional: true},
{Name: "user", Desc: "Must be in the form of @user, either as an actual ping or just part of the users name. If included, filters results to just that user. If there are spaces in the username, you must use quotes.", Optional: true},
{Name: "arbitrary string", Desc: "An arbitrary string starting with either `!` or `$`. `!` will search for an exact command (regardless of what your command prefix has been set to), whereas `$` will simply search for the string anywhere in the audit log. This will eat up all remaining arguments, so put the user and the range BEFORE specifying the search string, and don't use quotes!", Optional: true},
},
}
}
func (c *getAuditCommand) UsageShort() string { return "Inspects the audit log." }
| {
return &CommandUsage{Desc: "Tells sweetiebot to shut down, calls an update script, rebuilds the code, and then restarts."}
} | identifier_body |
DebugModule.go | package sweetiebot
import (
"fmt"
"sort"
"strings"
"strconv"
"github.com/blackhole12/discordgo"
)
type DebugModule struct {
}
// Name of the module
func (w *DebugModule) Name() string {
return "Debug"
}
// Commands in the module
func (w *DebugModule) Commands() []Command {
return []Command{
&echoCommand{},
&echoEmbedCommand{},
&disableCommand{},
&enableCommand{},
&updateCommand{},
&dumpTablesCommand{},
&listGuildsCommand{},
&announceCommand{},
&removeAliasCommand{},
&getAuditCommand{},
}
}
// Description of the module
func (w *DebugModule) Description() string {
return "Contains various debugging commands. Some of these commands can only be run by the bot owner."
}
type echoCommand struct {
}
func (c *echoCommand) Name() string {
return "Echo"
}
func (c *echoCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```You have to tell me to say something, silly!```", false, nil
}
arg := args[0]
if channelregex.MatchString(arg) |
return msg.Content[indices[0]:], false, nil
}
func (c *echoCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Makes Sweetie Bot say the given sentence in `#channel`, or in the current channel if no channel is provided.",
Params: []CommandUsageParam{
{Name: "#channel", Desc: "The channel to echo the message in. If omitted, message is sent to this channel.", Optional: true},
{Name: "arbitrary string", Desc: "An arbitrary string for Sweetie Bot to say.", Optional: false},
},
}
}
func (c *echoCommand) UsageShort() string {
return "Makes Sweetie Bot say something in the given channel."
}
type echoEmbedCommand struct {
}
func (c *echoEmbedCommand) Name() string {
return "EchoEmbed"
}
func (c *echoEmbedCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```You have to tell me to say something, silly!```", false, nil
}
arg := args[0]
channel := msg.ChannelID
i := 0
if channelregex.MatchString(arg) {
if len(args) < 2 {
return "```You have to tell me to say something, silly!```", false, nil
}
channel = arg[2 : len(arg)-1]
i++
}
if i >= len(args) {
return "```A URL is mandatory or discord won't send the embed message for some stupid reason.```", false, nil
}
url := args[i]
i++
var color uint64 = 0xFFFFFFFF
if i < len(args) {
if colorregex.MatchString(args[i]) {
if len(args) < i+2 {
return "```You have to tell me to say something, silly!```", false, nil
}
color, _ = strconv.ParseUint(args[i][2:], 16, 64)
i++
}
}
fields := make([]*discordgo.MessageEmbedField, 0, len(args)-i)
for i < len(args) {
s := strings.SplitN(args[i], ":", 2)
if len(s) < 2 {
return "```Malformed key:value pair. If your key value pair has a space in it, remember to put it in parenthesis!```", false, nil
}
fields = append(fields, &discordgo.MessageEmbedField{Name: s[0], Value: s[1], Inline: true})
i++
}
embed := &discordgo.MessageEmbed{
Type: "rich",
Author: &discordgo.MessageEmbedAuthor{
URL: url,
Name: msg.Author.Username + "#" + msg.Author.Discriminator,
IconURL: fmt.Sprintf("https://cdn.discordapp.com/avatars/%s/%s.jpg", msg.Author.ID, msg.Author.Avatar),
},
Color: int(color),
Fields: fields,
}
info.SendEmbed(channel, embed)
return "", false, nil
}
func (c *echoEmbedCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Makes Sweetie Bot assemble a rich text embed and echo it in the given channel",
Params: []CommandUsageParam{
{Name: "#channel", Desc: "The channel to echo the message in. If omitted, message is sent to this channel.", Optional: true},
{Name: "URL", Desc: "URL for the author to link to.", Optional: false},
{Name: "0xC0L0R", Desc: "Color of the embed box.", Optional: true},
{Name: "key:value", Desc: "A key:value pair of fields to display in the embed. Remember to use quotes around the *entire* key:value pair if either the key or the value have spaces.", Optional: true, Variadic: true},
},
}
}
func (c *echoEmbedCommand) UsageShort() string {
return "Makes Sweetie Bot echo a rich text embed in a given channel."
}
func SetCommandEnable(args []string, enable bool, success string, info *GuildInfo, channelID string) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```No module or command specified.Use " + info.config.Basic.CommandPrefix + "help with no arguments to list all modules and commands.```", false, nil
}
name := strings.ToLower(args[0])
for _, v := range info.modules {
if strings.ToLower(v.Name()) == name {
cmds := v.Commands()
for _, v := range cmds {
str := strings.ToLower(v.Name())
if enable {
delete(info.config.Modules.CommandDisabled, str)
} else {
CheckMapNilBool(&info.config.Modules.CommandDisabled)
info.config.Modules.CommandDisabled[str] = true
}
}
if enable {
delete(info.config.Modules.Disabled, name)
} else {
CheckMapNilBool(&info.config.Modules.Disabled)
info.config.Modules.Disabled[name] = true
}
info.SaveConfig()
return "", false, DumpCommandsModules(channelID, info, "", "**Success!** "+args[0]+success)
}
}
for _, v := range info.commands {
str := strings.ToLower(v.Name())
if str == name {
if enable {
delete(info.config.Modules.CommandDisabled, str)
} else {
CheckMapNilBool(&info.config.Modules.CommandDisabled)
info.config.Modules.CommandDisabled[str] = true
}
info.SaveConfig()
return "", false, DumpCommandsModules(channelID, info, "", "**Success!** "+args[0]+success)
}
}
return "```The " + args[0] + " module/command does not exist. Use " + info.config.Basic.CommandPrefix + "help with no arguments to list all modules and commands.```", false, nil
}
type disableCommand struct {
}
func (c *disableCommand) Name() string {
return "Disable"
}
func (c *disableCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return SetCommandEnable(args, false, " was disabled.", info, msg.ChannelID)
}
func (c *disableCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Disables the given module or command, if possible. If the module/command is already disabled, does nothing.",
Params: []CommandUsageParam{
{Name: "module|command", Desc: "The module or command to disable. You do not need to specify the parent module of a command, only the command name itself.", Optional: false},
},
}
}
func (c *disableCommand) UsageShort() string { return "Disables the given module/command, if possible." }
type enableCommand struct {
}
func (c *enableCommand) Name() string {
return "Enable"
}
func (c *enableCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return SetCommandEnable(args, true, " was enabled.", info, msg.ChannelID)
}
func (c *enableCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Enables the given module or command, if possible. If the module/command is already enabled, does nothing.",
Params: []CommandUsageParam{
{Name: "module|command", Desc: "The module or command to enable. You do not need to specify the parent module of a command, only the command name itself.", Optional: false},
},
}
}
func (c *enableCommand) UsageShort() string { return "Enables the given module/command." }
func (c *enableCommand) Roles() []string { return []string{"Princesses", "Royal Guard"} }
func (c *enableCommand) Channels() []string { return []string{} }
type updateCommand struct {
}
func (c *updateCommand) Name() string {
return "Update"
}
func (c *updateCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
/*sb.log.Log("Update command called, current PID: ", os.Getpid())
err := exec.Command("./update.sh", strconv.Itoa(os.Getpid())).Start()
if err != nil {
sb.log.Log("Command.Start() error: ", err.Error())
return "```Could not start update script!```"
}*/
sb.guildsLock.RLock()
defer sb.guildsLock.RUnlock()
for _, v := range sb.guilds {
if v.config.Log.Channel > 0 {
v.SendMessage(SBitoa(v.config.Log.Channel), "```Shutting down for update...```")
}
}
sb.quit.set(true) // Instead of trying to call a batch script, we run the bot inside an infinite loop batch script and just shut it off when we want to update
return "```Shutting down for update...```", false, nil
}
func (c *updateCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Tells sweetiebot to shut down, calls an update script, rebuilds the code, and then restarts."}
}
func (c *updateCommand) UsageShort() string { return "[RESTRICTED] Updates sweetiebot." }
func (c *updateCommand) Roles() []string { return []string{"Princesses"} }
func (c *updateCommand) Channels() []string { return []string{} }
type dumpTablesCommand struct {
}
func (c *dumpTablesCommand) Name() string {
return "DumpTables"
}
func (c *dumpTablesCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return "```\n" + sb.db.GetTableCounts() + "```", false, nil
}
func (c *dumpTablesCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Dumps table row counts."}
}
func (c *dumpTablesCommand) UsageShort() string { return "Dumps table row counts." }
type guildSlice []*discordgo.Guild
func (s guildSlice) Len() int {
return len(s)
}
func (s guildSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s guildSlice) Less(i, j int) bool {
if s[i].MemberCount > len(s[i].Members) {
i = s[i].MemberCount
} else {
i = len(s[i].Members)
}
if s[j].MemberCount > len(s[j].Members) {
j = s[j].MemberCount
} else {
j = len(s[j].Members)
}
return i > j
}
type listGuildsCommand struct {
}
func (c *listGuildsCommand) Name() string {
return "ListGuilds"
}
func (c *listGuildsCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
sb.dg.State.RLock()
guilds := append([]*discordgo.Guild{}, sb.dg.State.Guilds...)
sb.dg.State.RUnlock()
sort.Sort(guildSlice(guilds))
s := make([]string, 0, len(guilds))
private := 0
for _, v := range guilds {
if !isOwner {
sb.guildsLock.RLock()
g, ok := sb.guilds[SBatoi(v.ID)]
sb.guildsLock.RUnlock()
if ok && g.config.Basic.Importable {
s = append(s, PartialSanitize(v.Name))
} else {
private++
}
} else {
username := "<@" + v.OwnerID + ">"
if sb.db.status.get() {
m, _, _, _ := sb.db.GetUser(SBatoi(v.OwnerID))
if m != nil {
username = m.Username + "#" + m.Discriminator
}
}
count := v.MemberCount
if count < len(v.Members) {
count = len(v.Members)
}
s = append(s, PartialSanitize(fmt.Sprintf("%v (%v) - %v", v.Name, count, username)))
}
}
return fmt.Sprintf("```Sweetie has joined these servers:\n%s\n\n+ %v private servers (Basic.Importable is false)```", strings.Join(s, "\n"), private), len(s) > 8, nil
}
func (c *listGuildsCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Lists the servers that sweetiebot has joined."}
}
func (c *listGuildsCommand) UsageShort() string { return "Lists servers." }
type announceCommand struct {
}
func (c *announceCommand) Name() string {
return "Announce"
}
func (c *announceCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
arg := msg.Content[indices[0]:]
sb.guildsLock.RLock()
defer sb.guildsLock.RUnlock()
for _, v := range sb.guilds {
if v.config.Log.Channel > 0 {
v.SendMessage(SBitoa(v.config.Log.Channel), "<@&"+SBitoa(v.config.Basic.AlertRole)+"> "+arg)
}
}
return "", false, nil
}
func (c *announceCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Restricted command that announces a message to all the log channels of all servers.",
Params: []CommandUsageParam{
{Name: "arbitrary string", Desc: "An arbitrary string for Sweetie Bot to say.", Optional: false},
},
}
}
func (c *announceCommand) UsageShort() string { return "[RESTRICTED] Announcement command." }
type removeAliasCommand struct {
}
func (c *removeAliasCommand) Name() string {
return "RemoveAlias"
}
func (c *removeAliasCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
if len(args) < 1 {
return "```You must PING the user you want to remove an alias from.```", false, nil
}
if len(args) < 2 {
return "```You must provide an alias to remove.```", false, nil
}
if !sb.db.CheckStatus() {
return "```A temporary database outage is preventing this command from being executed.```", false, nil
}
sb.db.RemoveAlias(PingAtoi(args[0]), msg.Content[indices[1]:])
return "```Attempted to remove the alias. Use " + info.config.Basic.CommandPrefix + "aka to check if it worked.```", false, nil
}
func (c *removeAliasCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Restricted command that removes the alias for a given user. The user must be pinged, and the alias must match precisely.",
Params: []CommandUsageParam{
{Name: "user", Desc: "A ping to a specific user in the format @User.", Optional: false},
{Name: "alias", Desc: "The *exact* name of the alias to remove.", Optional: false},
},
}
}
func (c *removeAliasCommand) UsageShort() string { return "[RESTRICTED] Removes an alias." }
type getAuditCommand struct {
}
func (c *getAuditCommand) Name() string {
return "GetAudit"
}
func (c *getAuditCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
var low uint64
var high uint64 = 10
var user *uint64
var search string
if !sb.db.CheckStatus() {
return "```A temporary database outage is preventing this command from being executed.```", false, nil
}
for i := 0; i < len(args); i++ {
if len(args[i]) > 0 {
switch args[i][0] {
case '<', '@':
if args[i][0] == '@' || (len(args[i]) > 1 && args[i][1] == '@') {
var IDs []uint64
if args[i][0] == '@' {
IDs = FindUsername(args[i][1:], info)
} else {
IDs = []uint64{SBatoi(StripPing(args[i]))}
}
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + args[i] + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info, true), "\n") + "```", len(IDs) > 5, nil
}
user = &IDs[0]
break
}
fallthrough
case '$', '!':
if args[i][0] != '!' {
search = "%"
}
if args[i][0] == '$' {
search += msg.Content[indices[i]+1:] + "%"
} else {
search += msg.Content[indices[i]:] + "%"
}
i = len(args)
default:
s := strings.SplitN(args[i], "-", 2)
if len(s) == 1 {
high = SBatoi(s[0])
} else if len(s) > 1 {
low = SBatoi(s[0]) - 1
high = SBatoi(s[1])
}
}
}
}
r := sb.db.GetAuditRows(low, high, user, search, SBatoi(info.ID))
ret := []string{"```Matching Audit Log entries:```"}
for _, v := range r {
ret = append(ret, fmt.Sprintf("[%s] %s: %s", ApplyTimezone(v.Timestamp, info, msg.Author).Format("1/2 3:04:05PM"), v.Author, v.Message))
}
return strings.Join(ret, "\n"), len(ret) > 12, nil
}
func (c *getAuditCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Allows admins to inspect the audit log.",
Params: []CommandUsageParam{
{Name: "range", Desc: "If this is a single number, the number of results to return. If it's a range in the form 999-9999, returns the given range of audit log entries, up to a maximum of 50 in one call. Defaults to displaying 1-10.", Optional: true},
{Name: "user", Desc: "Must be in the form of @user, either as an actual ping or just part of the users name. If included, filters results to just that user. If there are spaces in the username, you must use quotes.", Optional: true},
{Name: "arbitrary string", Desc: "An arbitrary string starting with either `!` or `$`. `!` will search for an exact command (regardless of what your command prefix has been set to), whereas `$` will simply search for the string anywhere in the audit log. This will eat up all remaining arguments, so put the user and the range BEFORE specifying the search string, and don't use quotes!", Optional: true},
},
}
}
func (c *getAuditCommand) UsageShort() string { return "Inspects the audit log." }
| {
if len(args) < 2 {
return "```You have to tell me to say something, silly!```", false, nil
}
info.SendMessage(arg[2:len(arg)-1], msg.Content[indices[1]:])
return "", false, nil
} | conditional_block |
DebugModule.go | package sweetiebot
import (
"fmt"
"sort"
"strings"
"strconv"
"github.com/blackhole12/discordgo"
)
type DebugModule struct {
}
// Name of the module
func (w *DebugModule) Name() string {
return "Debug"
}
// Commands in the module
func (w *DebugModule) Commands() []Command {
return []Command{
&echoCommand{},
&echoEmbedCommand{},
&disableCommand{},
&enableCommand{},
&updateCommand{},
&dumpTablesCommand{},
&listGuildsCommand{},
&announceCommand{},
&removeAliasCommand{},
&getAuditCommand{},
}
}
// Description of the module
func (w *DebugModule) Description() string {
return "Contains various debugging commands. Some of these commands can only be run by the bot owner."
}
type echoCommand struct {
}
func (c *echoCommand) Name() string {
return "Echo"
}
func (c *echoCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```You have to tell me to say something, silly!```", false, nil
}
arg := args[0]
if channelregex.MatchString(arg) {
if len(args) < 2 {
return "```You have to tell me to say something, silly!```", false, nil
}
info.SendMessage(arg[2:len(arg)-1], msg.Content[indices[1]:])
return "", false, nil
}
return msg.Content[indices[0]:], false, nil
}
func (c *echoCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Makes Sweetie Bot say the given sentence in `#channel`, or in the current channel if no channel is provided.",
Params: []CommandUsageParam{
{Name: "#channel", Desc: "The channel to echo the message in. If omitted, message is sent to this channel.", Optional: true},
{Name: "arbitrary string", Desc: "An arbitrary string for Sweetie Bot to say.", Optional: false},
},
}
}
func (c *echoCommand) UsageShort() string {
return "Makes Sweetie Bot say something in the given channel."
}
type echoEmbedCommand struct {
}
func (c *echoEmbedCommand) Name() string {
return "EchoEmbed"
}
func (c *echoEmbedCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```You have to tell me to say something, silly!```", false, nil
}
arg := args[0]
channel := msg.ChannelID
i := 0
if channelregex.MatchString(arg) {
if len(args) < 2 {
return "```You have to tell me to say something, silly!```", false, nil
}
channel = arg[2 : len(arg)-1]
i++
}
if i >= len(args) {
return "```A URL is mandatory or discord won't send the embed message for some stupid reason.```", false, nil
}
url := args[i]
i++
var color uint64 = 0xFFFFFFFF
if i < len(args) {
if colorregex.MatchString(args[i]) {
if len(args) < i+2 {
return "```You have to tell me to say something, silly!```", false, nil
}
color, _ = strconv.ParseUint(args[i][2:], 16, 64)
i++
}
}
fields := make([]*discordgo.MessageEmbedField, 0, len(args)-i)
for i < len(args) {
s := strings.SplitN(args[i], ":", 2)
if len(s) < 2 {
return "```Malformed key:value pair. If your key value pair has a space in it, remember to put it in parenthesis!```", false, nil
}
fields = append(fields, &discordgo.MessageEmbedField{Name: s[0], Value: s[1], Inline: true})
i++
}
embed := &discordgo.MessageEmbed{
Type: "rich",
Author: &discordgo.MessageEmbedAuthor{
URL: url,
Name: msg.Author.Username + "#" + msg.Author.Discriminator,
IconURL: fmt.Sprintf("https://cdn.discordapp.com/avatars/%s/%s.jpg", msg.Author.ID, msg.Author.Avatar),
},
Color: int(color),
Fields: fields,
}
info.SendEmbed(channel, embed)
return "", false, nil
}
func (c *echoEmbedCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Makes Sweetie Bot assemble a rich text embed and echo it in the given channel",
Params: []CommandUsageParam{
{Name: "#channel", Desc: "The channel to echo the message in. If omitted, message is sent to this channel.", Optional: true},
{Name: "URL", Desc: "URL for the author to link to.", Optional: false},
{Name: "0xC0L0R", Desc: "Color of the embed box.", Optional: true},
{Name: "key:value", Desc: "A key:value pair of fields to display in the embed. Remember to use quotes around the *entire* key:value pair if either the key or the value have spaces.", Optional: true, Variadic: true},
},
}
}
func (c *echoEmbedCommand) UsageShort() string {
return "Makes Sweetie Bot echo a rich text embed in a given channel."
}
func SetCommandEnable(args []string, enable bool, success string, info *GuildInfo, channelID string) (string, bool, *discordgo.MessageEmbed) {
if len(args) == 0 {
return "```No module or command specified.Use " + info.config.Basic.CommandPrefix + "help with no arguments to list all modules and commands.```", false, nil
}
name := strings.ToLower(args[0])
for _, v := range info.modules {
if strings.ToLower(v.Name()) == name {
cmds := v.Commands()
for _, v := range cmds {
str := strings.ToLower(v.Name())
if enable {
delete(info.config.Modules.CommandDisabled, str)
} else {
CheckMapNilBool(&info.config.Modules.CommandDisabled)
info.config.Modules.CommandDisabled[str] = true
}
}
if enable {
delete(info.config.Modules.Disabled, name)
} else {
CheckMapNilBool(&info.config.Modules.Disabled)
info.config.Modules.Disabled[name] = true
}
info.SaveConfig()
return "", false, DumpCommandsModules(channelID, info, "", "**Success!** "+args[0]+success)
}
}
for _, v := range info.commands {
str := strings.ToLower(v.Name())
if str == name {
if enable {
delete(info.config.Modules.CommandDisabled, str)
} else {
CheckMapNilBool(&info.config.Modules.CommandDisabled)
info.config.Modules.CommandDisabled[str] = true
}
info.SaveConfig()
return "", false, DumpCommandsModules(channelID, info, "", "**Success!** "+args[0]+success)
}
}
return "```The " + args[0] + " module/command does not exist. Use " + info.config.Basic.CommandPrefix + "help with no arguments to list all modules and commands.```", false, nil
}
type disableCommand struct {
}
func (c *disableCommand) Name() string {
return "Disable"
}
func (c *disableCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return SetCommandEnable(args, false, " was disabled.", info, msg.ChannelID)
}
func (c *disableCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Disables the given module or command, if possible. If the module/command is already disabled, does nothing.",
Params: []CommandUsageParam{
{Name: "module|command", Desc: "The module or command to disable. You do not need to specify the parent module of a command, only the command name itself.", Optional: false},
},
}
}
func (c *disableCommand) UsageShort() string { return "Disables the given module/command, if possible." }
type enableCommand struct {
}
func (c *enableCommand) Name() string {
return "Enable"
}
func (c *enableCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return SetCommandEnable(args, true, " was enabled.", info, msg.ChannelID)
}
func (c *enableCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Enables the given module or command, if possible. If the module/command is already enabled, does nothing.",
Params: []CommandUsageParam{
{Name: "module|command", Desc: "The module or command to enable. You do not need to specify the parent module of a command, only the command name itself.", Optional: false},
},
}
}
func (c *enableCommand) UsageShort() string { return "Enables the given module/command." }
func (c *enableCommand) Roles() []string { return []string{"Princesses", "Royal Guard"} }
func (c *enableCommand) Channels() []string { return []string{} }
type updateCommand struct {
}
func (c *updateCommand) Name() string {
return "Update"
}
func (c *updateCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
/*sb.log.Log("Update command called, current PID: ", os.Getpid())
err := exec.Command("./update.sh", strconv.Itoa(os.Getpid())).Start()
if err != nil {
sb.log.Log("Command.Start() error: ", err.Error())
return "```Could not start update script!```"
}*/
sb.guildsLock.RLock()
defer sb.guildsLock.RUnlock()
for _, v := range sb.guilds {
if v.config.Log.Channel > 0 {
v.SendMessage(SBitoa(v.config.Log.Channel), "```Shutting down for update...```")
}
}
sb.quit.set(true) // Instead of trying to call a batch script, we run the bot inside an infinite loop batch script and just shut it off when we want to update
return "```Shutting down for update...```", false, nil
}
func (c *updateCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Tells sweetiebot to shut down, calls an update script, rebuilds the code, and then restarts."}
}
func (c *updateCommand) UsageShort() string { return "[RESTRICTED] Updates sweetiebot." }
func (c *updateCommand) Roles() []string { return []string{"Princesses"} }
func (c *updateCommand) Channels() []string { return []string{} }
type dumpTablesCommand struct {
}
func (c *dumpTablesCommand) Name() string {
return "DumpTables"
}
func (c *dumpTablesCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
return "```\n" + sb.db.GetTableCounts() + "```", false, nil
}
func (c *dumpTablesCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Dumps table row counts."}
}
func (c *dumpTablesCommand) UsageShort() string { return "Dumps table row counts." }
type guildSlice []*discordgo.Guild
func (s guildSlice) Len() int {
return len(s)
}
func (s guildSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s guildSlice) Less(i, j int) bool {
if s[i].MemberCount > len(s[i].Members) {
i = s[i].MemberCount
} else {
i = len(s[i].Members)
}
if s[j].MemberCount > len(s[j].Members) {
j = s[j].MemberCount
} else {
j = len(s[j].Members)
}
return i > j
}
type listGuildsCommand struct {
}
func (c *listGuildsCommand) Name() string {
return "ListGuilds"
}
func (c *listGuildsCommand) | (args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
sb.dg.State.RLock()
guilds := append([]*discordgo.Guild{}, sb.dg.State.Guilds...)
sb.dg.State.RUnlock()
sort.Sort(guildSlice(guilds))
s := make([]string, 0, len(guilds))
private := 0
for _, v := range guilds {
if !isOwner {
sb.guildsLock.RLock()
g, ok := sb.guilds[SBatoi(v.ID)]
sb.guildsLock.RUnlock()
if ok && g.config.Basic.Importable {
s = append(s, PartialSanitize(v.Name))
} else {
private++
}
} else {
username := "<@" + v.OwnerID + ">"
if sb.db.status.get() {
m, _, _, _ := sb.db.GetUser(SBatoi(v.OwnerID))
if m != nil {
username = m.Username + "#" + m.Discriminator
}
}
count := v.MemberCount
if count < len(v.Members) {
count = len(v.Members)
}
s = append(s, PartialSanitize(fmt.Sprintf("%v (%v) - %v", v.Name, count, username)))
}
}
return fmt.Sprintf("```Sweetie has joined these servers:\n%s\n\n+ %v private servers (Basic.Importable is false)```", strings.Join(s, "\n"), private), len(s) > 8, nil
}
func (c *listGuildsCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{Desc: "Lists the servers that sweetiebot has joined."}
}
func (c *listGuildsCommand) UsageShort() string { return "Lists servers." }
type announceCommand struct {
}
func (c *announceCommand) Name() string {
return "Announce"
}
func (c *announceCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
arg := msg.Content[indices[0]:]
sb.guildsLock.RLock()
defer sb.guildsLock.RUnlock()
for _, v := range sb.guilds {
if v.config.Log.Channel > 0 {
v.SendMessage(SBitoa(v.config.Log.Channel), "<@&"+SBitoa(v.config.Basic.AlertRole)+"> "+arg)
}
}
return "", false, nil
}
func (c *announceCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Restricted command that announces a message to all the log channels of all servers.",
Params: []CommandUsageParam{
{Name: "arbitrary string", Desc: "An arbitrary string for Sweetie Bot to say.", Optional: false},
},
}
}
func (c *announceCommand) UsageShort() string { return "[RESTRICTED] Announcement command." }
type removeAliasCommand struct {
}
func (c *removeAliasCommand) Name() string {
return "RemoveAlias"
}
func (c *removeAliasCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
_, isOwner := sb.Owners[SBatoi(msg.Author.ID)]
if !isOwner {
return "```Only the owner of the bot itself can call this!```", false, nil
}
if len(args) < 1 {
return "```You must PING the user you want to remove an alias from.```", false, nil
}
if len(args) < 2 {
return "```You must provide an alias to remove.```", false, nil
}
if !sb.db.CheckStatus() {
return "```A temporary database outage is preventing this command from being executed.```", false, nil
}
sb.db.RemoveAlias(PingAtoi(args[0]), msg.Content[indices[1]:])
return "```Attempted to remove the alias. Use " + info.config.Basic.CommandPrefix + "aka to check if it worked.```", false, nil
}
func (c *removeAliasCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Restricted command that removes the alias for a given user. The user must be pinged, and the alias must match precisely.",
Params: []CommandUsageParam{
{Name: "user", Desc: "A ping to a specific user in the format @User.", Optional: false},
{Name: "alias", Desc: "The *exact* name of the alias to remove.", Optional: false},
},
}
}
func (c *removeAliasCommand) UsageShort() string { return "[RESTRICTED] Removes an alias." }
type getAuditCommand struct {
}
func (c *getAuditCommand) Name() string {
return "GetAudit"
}
func (c *getAuditCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
var low uint64
var high uint64 = 10
var user *uint64
var search string
if !sb.db.CheckStatus() {
return "```A temporary database outage is preventing this command from being executed.```", false, nil
}
for i := 0; i < len(args); i++ {
if len(args[i]) > 0 {
switch args[i][0] {
case '<', '@':
if args[i][0] == '@' || (len(args[i]) > 1 && args[i][1] == '@') {
var IDs []uint64
if args[i][0] == '@' {
IDs = FindUsername(args[i][1:], info)
} else {
IDs = []uint64{SBatoi(StripPing(args[i]))}
}
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + args[i] + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info, true), "\n") + "```", len(IDs) > 5, nil
}
user = &IDs[0]
break
}
fallthrough
case '$', '!':
if args[i][0] != '!' {
search = "%"
}
if args[i][0] == '$' {
search += msg.Content[indices[i]+1:] + "%"
} else {
search += msg.Content[indices[i]:] + "%"
}
i = len(args)
default:
s := strings.SplitN(args[i], "-", 2)
if len(s) == 1 {
high = SBatoi(s[0])
} else if len(s) > 1 {
low = SBatoi(s[0]) - 1
high = SBatoi(s[1])
}
}
}
}
r := sb.db.GetAuditRows(low, high, user, search, SBatoi(info.ID))
ret := []string{"```Matching Audit Log entries:```"}
for _, v := range r {
ret = append(ret, fmt.Sprintf("[%s] %s: %s", ApplyTimezone(v.Timestamp, info, msg.Author).Format("1/2 3:04:05PM"), v.Author, v.Message))
}
return strings.Join(ret, "\n"), len(ret) > 12, nil
}
func (c *getAuditCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Allows admins to inspect the audit log.",
Params: []CommandUsageParam{
{Name: "range", Desc: "If this is a single number, the number of results to return. If it's a range in the form 999-9999, returns the given range of audit log entries, up to a maximum of 50 in one call. Defaults to displaying 1-10.", Optional: true},
{Name: "user", Desc: "Must be in the form of @user, either as an actual ping or just part of the users name. If included, filters results to just that user. If there are spaces in the username, you must use quotes.", Optional: true},
{Name: "arbitrary string", Desc: "An arbitrary string starting with either `!` or `$`. `!` will search for an exact command (regardless of what your command prefix has been set to), whereas `$` will simply search for the string anywhere in the audit log. This will eat up all remaining arguments, so put the user and the range BEFORE specifying the search string, and don't use quotes!", Optional: true},
},
}
}
func (c *getAuditCommand) UsageShort() string { return "Inspects the audit log." }
| Process | identifier_name |
plugin.go | // +build linux
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package df
import (
"bufio"
"errors"
"fmt"
"os"
"path"
"strings"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/control/plugin/cpolicy"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap-plugin-utilities/config"
)
const (
// PluginName df collector plugin name
PluginName = "df"
// Version of plugin
Version = 3
nsVendor = "intel"
nsClass = "procfs"
nsType = "filesystem"
)
var (
//procPath source of data for metrics
procPath = "/proc"
// prefix in metric namespace
namespacePrefix = []string{nsVendor, nsClass, nsType}
metricsKind = []string{
"space_free",
"space_reserved",
"space_used",
"space_percent_free",
"space_percent_reserved",
"space_percent_used",
"inodes_free",
"inodes_reserved",
"inodes_used",
"inodes_percent_free",
"inodes_percent_reserved",
"inodes_percent_used",
"device_name",
"device_type",
}
invalidFSTypes = []string{
"proc",
"binfmt_misc",
"fuse.gvfsd-fuse",
"sysfs",
"cgroup",
"fusectl",
"pstore",
"debugfs",
"securityfs",
"devpts",
"mqueue",
}
)
// Function to check properness of configuration parameter
// and set plugin attribute accordingly
func (p *dfCollector) setProcPath(cfg interface{}) error {
procPath, err := config.GetConfigItem(cfg, "proc_path")
if err == nil && len(procPath.(string)) > 0 {
procPathStats, err := os.Stat(procPath.(string))
if err != nil {
return err
}
if !procPathStats.IsDir() {
return errors.New(fmt.Sprintf("%s is not a directory", procPath.(string)))
}
p.proc_path = procPath.(string)
}
return nil
}
// GetMetricTypes returns list of available metric types
// It returns error in case retrieval was not successful
func (p *dfCollector) GetMetricTypes(cfg plugin.ConfigType) ([]plugin.MetricType, error) {
mts := []plugin.MetricType{}
for _, kind := range metricsKind {
mts = append(mts, plugin.MetricType{
Namespace_: core.NewNamespace(namespacePrefix...).
AddDynamicElement("filesystem", "name of filesystem").
AddStaticElement(kind),
Description_: "dynamic filesystem metric: " + kind,
})
}
return mts, nil
}
// CollectMetrics returns list of requested metric values
// It returns error in case retrieval was not successful
func (p *dfCollector) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {
err := p.setProcPath(mts[0])
if err != nil {
return nil, err
}
metrics := []plugin.MetricType{}
curTime := time.Now()
dfms, err := p.stats.collect(p.proc_path)
if err != nil {
return metrics, fmt.Errorf(fmt.Sprintf("Unable to collect metrics from df: %s", err))
}
for _, m := range mts {
ns := m.Namespace()
lns := len(ns)
if lns < 5 {
return nil, fmt.Errorf("Wrong namespace length %d", lns)
}
if ns[lns-2].Value == "*" {
for _, dfm := range dfms {
kind := ns[lns-1].Value
ns1 := core.NewNamespace(createNamespace(dfm.MountPoint, kind)...)
ns1[len(ns1)-2].Name = ns[lns-2].Name
metric := plugin.MetricType{
Timestamp_: curTime,
Namespace_: ns1,
}
fillMetric(kind, dfm, &metric)
metrics = append(metrics, metric)
}
} else {
for _, dfm := range dfms {
if ns[lns-2].Value == dfm.MountPoint {
metric := plugin.MetricType{
Timestamp_: curTime,
Namespace_: ns,
}
kind := ns[lns-1].Value
fillMetric(kind, dfm, &metric)
metrics = append(metrics, metric)
}
}
}
}
return metrics, nil
}
// Function to fill metric with proper (computed) value
func fillMetric(kind string, dfm dfMetric, metric *plugin.MetricType) {
switch kind {
case "space_free":
metric.Data_ = dfm.Available
case "space_reserved":
metric.Data_ = dfm.Blocks - (dfm.Used + dfm.Available)
case "space_used":
metric.Data_ = dfm.Used
case "space_percent_free":
metric.Data_ = 100 * float64(dfm.Available) / float64(dfm.Blocks)
case "space_percent_reserved":
metric.Data_ = 100 * float64(dfm.Blocks-(dfm.Used+dfm.Available)) / float64(dfm.Blocks)
case "space_percent_used":
metric.Data_ = 100 * float64(dfm.Used) / float64(dfm.Blocks)
case "device_name":
metric.Data_ = dfm.Filesystem
case "device_type":
metric.Data_ = dfm.FsType
case "inodes_free":
metric.Data_ = dfm.IFree
case "inodes_reserved":
metric.Data_ = dfm.Inodes - (dfm.IUsed + dfm.IFree)
case "inodes_used":
metric.Data_ = dfm.IUsed
case "inodes_percent_free":
metric.Data_ = 100 * float64(dfm.IFree) / float64(dfm.Inodes)
case "inodes_percent_reserved":
metric.Data_ = 100 * float64(dfm.Inodes-(dfm.IUsed+dfm.IFree)) / float64(dfm.Inodes)
case "inodes_percent_used":
metric.Data_ = 100 * float64(dfm.IUsed) / float64(dfm.Inodes)
}
}
// createNamespace returns namespace slice of strings composed from: vendor, class, type and components of metric name
func | (elt string, name string) []string {
var suffix = []string{elt, name}
return append(namespacePrefix, suffix...)
}
// GetConfigPolicy returns config policy
// It returns error in case retrieval was not successful
func (p *dfCollector) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
rule, _ := cpolicy.NewStringRule("proc_path", false, "/proc")
node := cpolicy.NewPolicyNode()
node.Add(rule)
cp.Add([]string{nsVendor, nsClass, PluginName}, node)
return cp, nil
}
// NewDfCollector creates new instance of plugin and returns pointer to initialized object.
func NewDfCollector() *dfCollector {
logger := log.New()
return &dfCollector{
stats: &dfStats{},
logger: logger,
proc_path: procPath,
}
}
// Meta returns plugin's metadata
func Meta() *plugin.PluginMeta {
return plugin.NewPluginMeta(
PluginName,
Version,
plugin.CollectorPluginType,
[]string{plugin.SnapGOBContentType},
[]string{plugin.SnapGOBContentType},
plugin.ConcurrencyCount(1),
)
}
type dfCollector struct {
stats collector
logger *log.Logger
proc_path string
}
type dfMetric struct {
Filesystem string
Used, Available, Blocks uint64
Capacity float64
FsType string
MountPoint string
UnchangedMountPoint string
Inodes, IUsed, IFree uint64
IUse float64
}
type collector interface {
collect(string) ([]dfMetric, error)
}
type dfStats struct{}
func (dfs *dfStats) collect(procPath string) ([]dfMetric, error) {
dfms := []dfMetric{}
cpath := path.Join(procPath, "1", "mountinfo")
fh, err := os.Open(cpath)
if err != nil {
log.Error(fmt.Sprintf("Got error %#v", err))
return nil, err
}
defer fh.Close()
scanner := bufio.NewScanner(fh)
for scanner.Scan() {
inLine := scanner.Text()
// https://www.kernel.org/doc/Documentation/filesystems/proc.txt
// or "man proc" + look for mountinfo to see meaning of fields
lParts := strings.Split(inLine, " - ")
if len(lParts) != 2 {
return nil, fmt.Errorf("Wrong format %d parts found instead of 2", len(lParts))
}
leftFields := strings.Fields(lParts[0])
if len(leftFields) != 6 && len(leftFields) != 7 {
return nil, fmt.Errorf("Wrong format %d fields found on the left side instead of 6 or 7", len(leftFields))
}
rightFields := strings.Fields(lParts[1])
if len(rightFields) != 3 {
return nil, fmt.Errorf("Wrong format %d fields found on the right side instead of 7 min", len(rightFields))
}
// Keep only meaningfull filesystems
if !invalidFS(rightFields[0]) {
var dfm dfMetric
dfm.Filesystem = rightFields[1]
dfm.FsType = rightFields[0]
dfm.UnchangedMountPoint = leftFields[4]
if leftFields[4] == "/" {
dfm.MountPoint = "rootfs"
} else {
dfm.MountPoint = strings.Replace(leftFields[4][1:], "/", "_", -1)
// Because there are mounted FS containing dots
// (like /etc/resolv.conf in Docker containers)
// and this is incompatible with Snap metric name policies
dfm.MountPoint = strings.Replace(dfm.MountPoint, ".", "_", -1)
}
stat := syscall.Statfs_t{}
err := syscall.Statfs(leftFields[4], &stat)
if err != nil {
log.Error(fmt.Sprintf("Error getting filesystem infos for %s", leftFields[4]))
continue
}
// Blocks
dfm.Blocks = (stat.Blocks * uint64(stat.Bsize)) / 1024
dfm.Available = (stat.Bavail * uint64(stat.Bsize)) / 1024
xFree := (stat.Bfree * uint64(stat.Bsize)) / 1024
dfm.Used = dfm.Blocks - xFree
percentAvailable := ceilPercent(dfm.Used, dfm.Used+dfm.Available)
dfm.Capacity = percentAvailable / 100.0
// Inodes
dfm.Inodes = stat.Files
dfm.IFree = stat.Ffree
dfm.IUsed = dfm.Inodes - dfm.IFree
percentIUsed := ceilPercent(dfm.IUsed, dfm.Inodes)
dfm.IUse = percentIUsed / 100.0
dfms = append(dfms, dfm)
}
}
return dfms, nil
}
// Return true if filesystem should not be taken into account
func invalidFS(fs string) bool {
for _, v := range invalidFSTypes {
if fs == v {
return true
}
}
return false
}
// Ceiling function preventing addition of math library
func ceilPercent(v uint64, t uint64) float64 {
// Prevent division by 0 to occur
if t == 0 {
return 0.0
}
var v1i uint64
v1i = v * 100 / t
var v1f float64
v1f = float64(v) * 100.0 / float64(t)
var v2f float64
v2f = float64(v1i)
if v2f-1 < v1f && v1f <= v2f+1 {
addF := 0.0
if v2f < v1f {
addF = 1.0
}
v1f = v2f + addF
}
return v1f
}
func makeNamespace(dfm dfMetric, kind string) []string {
ns := []string{}
ns = append(ns, namespacePrefix...)
ns = append(ns, dfm.MountPoint, kind)
return ns
}
// validate if metric should be exposed
func validateMetric(namespace []string, dfm dfMetric) bool {
mountPoint := namespace[0]
if mountPoint == dfm.MountPoint {
return true
}
return false
}
| createNamespace | identifier_name |
plugin.go | // +build linux
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package df
import (
"bufio"
"errors"
"fmt"
"os"
"path"
"strings"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/control/plugin/cpolicy"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap-plugin-utilities/config"
)
const (
// PluginName df collector plugin name
PluginName = "df"
// Version of plugin
Version = 3
nsVendor = "intel"
nsClass = "procfs"
nsType = "filesystem"
)
var (
//procPath source of data for metrics
procPath = "/proc"
// prefix in metric namespace
namespacePrefix = []string{nsVendor, nsClass, nsType}
metricsKind = []string{
"space_free",
"space_reserved",
"space_used",
"space_percent_free",
"space_percent_reserved",
"space_percent_used",
"inodes_free",
"inodes_reserved",
"inodes_used",
"inodes_percent_free",
"inodes_percent_reserved",
"inodes_percent_used",
"device_name",
"device_type",
}
invalidFSTypes = []string{
"proc",
"binfmt_misc",
"fuse.gvfsd-fuse",
"sysfs",
"cgroup",
"fusectl",
"pstore",
"debugfs",
"securityfs",
"devpts",
"mqueue",
}
)
// Function to check properness of configuration parameter
// and set plugin attribute accordingly
func (p *dfCollector) setProcPath(cfg interface{}) error {
procPath, err := config.GetConfigItem(cfg, "proc_path")
if err == nil && len(procPath.(string)) > 0 |
return nil
}
// GetMetricTypes returns list of available metric types
// It returns error in case retrieval was not successful
func (p *dfCollector) GetMetricTypes(cfg plugin.ConfigType) ([]plugin.MetricType, error) {
mts := []plugin.MetricType{}
for _, kind := range metricsKind {
mts = append(mts, plugin.MetricType{
Namespace_: core.NewNamespace(namespacePrefix...).
AddDynamicElement("filesystem", "name of filesystem").
AddStaticElement(kind),
Description_: "dynamic filesystem metric: " + kind,
})
}
return mts, nil
}
// CollectMetrics returns list of requested metric values
// It returns error in case retrieval was not successful
func (p *dfCollector) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {
err := p.setProcPath(mts[0])
if err != nil {
return nil, err
}
metrics := []plugin.MetricType{}
curTime := time.Now()
dfms, err := p.stats.collect(p.proc_path)
if err != nil {
return metrics, fmt.Errorf(fmt.Sprintf("Unable to collect metrics from df: %s", err))
}
for _, m := range mts {
ns := m.Namespace()
lns := len(ns)
if lns < 5 {
return nil, fmt.Errorf("Wrong namespace length %d", lns)
}
if ns[lns-2].Value == "*" {
for _, dfm := range dfms {
kind := ns[lns-1].Value
ns1 := core.NewNamespace(createNamespace(dfm.MountPoint, kind)...)
ns1[len(ns1)-2].Name = ns[lns-2].Name
metric := plugin.MetricType{
Timestamp_: curTime,
Namespace_: ns1,
}
fillMetric(kind, dfm, &metric)
metrics = append(metrics, metric)
}
} else {
for _, dfm := range dfms {
if ns[lns-2].Value == dfm.MountPoint {
metric := plugin.MetricType{
Timestamp_: curTime,
Namespace_: ns,
}
kind := ns[lns-1].Value
fillMetric(kind, dfm, &metric)
metrics = append(metrics, metric)
}
}
}
}
return metrics, nil
}
// Function to fill metric with proper (computed) value
func fillMetric(kind string, dfm dfMetric, metric *plugin.MetricType) {
switch kind {
case "space_free":
metric.Data_ = dfm.Available
case "space_reserved":
metric.Data_ = dfm.Blocks - (dfm.Used + dfm.Available)
case "space_used":
metric.Data_ = dfm.Used
case "space_percent_free":
metric.Data_ = 100 * float64(dfm.Available) / float64(dfm.Blocks)
case "space_percent_reserved":
metric.Data_ = 100 * float64(dfm.Blocks-(dfm.Used+dfm.Available)) / float64(dfm.Blocks)
case "space_percent_used":
metric.Data_ = 100 * float64(dfm.Used) / float64(dfm.Blocks)
case "device_name":
metric.Data_ = dfm.Filesystem
case "device_type":
metric.Data_ = dfm.FsType
case "inodes_free":
metric.Data_ = dfm.IFree
case "inodes_reserved":
metric.Data_ = dfm.Inodes - (dfm.IUsed + dfm.IFree)
case "inodes_used":
metric.Data_ = dfm.IUsed
case "inodes_percent_free":
metric.Data_ = 100 * float64(dfm.IFree) / float64(dfm.Inodes)
case "inodes_percent_reserved":
metric.Data_ = 100 * float64(dfm.Inodes-(dfm.IUsed+dfm.IFree)) / float64(dfm.Inodes)
case "inodes_percent_used":
metric.Data_ = 100 * float64(dfm.IUsed) / float64(dfm.Inodes)
}
}
// createNamespace returns namespace slice of strings composed from: vendor, class, type and components of metric name
func createNamespace(elt string, name string) []string {
var suffix = []string{elt, name}
return append(namespacePrefix, suffix...)
}
// GetConfigPolicy returns config policy
// It returns error in case retrieval was not successful
func (p *dfCollector) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
rule, _ := cpolicy.NewStringRule("proc_path", false, "/proc")
node := cpolicy.NewPolicyNode()
node.Add(rule)
cp.Add([]string{nsVendor, nsClass, PluginName}, node)
return cp, nil
}
// NewDfCollector creates new instance of plugin and returns pointer to initialized object.
func NewDfCollector() *dfCollector {
logger := log.New()
return &dfCollector{
stats: &dfStats{},
logger: logger,
proc_path: procPath,
}
}
// Meta returns plugin's metadata
func Meta() *plugin.PluginMeta {
return plugin.NewPluginMeta(
PluginName,
Version,
plugin.CollectorPluginType,
[]string{plugin.SnapGOBContentType},
[]string{plugin.SnapGOBContentType},
plugin.ConcurrencyCount(1),
)
}
type dfCollector struct {
stats collector
logger *log.Logger
proc_path string
}
type dfMetric struct {
Filesystem string
Used, Available, Blocks uint64
Capacity float64
FsType string
MountPoint string
UnchangedMountPoint string
Inodes, IUsed, IFree uint64
IUse float64
}
type collector interface {
collect(string) ([]dfMetric, error)
}
type dfStats struct{}
func (dfs *dfStats) collect(procPath string) ([]dfMetric, error) {
dfms := []dfMetric{}
cpath := path.Join(procPath, "1", "mountinfo")
fh, err := os.Open(cpath)
if err != nil {
log.Error(fmt.Sprintf("Got error %#v", err))
return nil, err
}
defer fh.Close()
scanner := bufio.NewScanner(fh)
for scanner.Scan() {
inLine := scanner.Text()
// https://www.kernel.org/doc/Documentation/filesystems/proc.txt
// or "man proc" + look for mountinfo to see meaning of fields
lParts := strings.Split(inLine, " - ")
if len(lParts) != 2 {
return nil, fmt.Errorf("Wrong format %d parts found instead of 2", len(lParts))
}
leftFields := strings.Fields(lParts[0])
if len(leftFields) != 6 && len(leftFields) != 7 {
return nil, fmt.Errorf("Wrong format %d fields found on the left side instead of 6 or 7", len(leftFields))
}
rightFields := strings.Fields(lParts[1])
if len(rightFields) != 3 {
return nil, fmt.Errorf("Wrong format %d fields found on the right side instead of 7 min", len(rightFields))
}
// Keep only meaningfull filesystems
if !invalidFS(rightFields[0]) {
var dfm dfMetric
dfm.Filesystem = rightFields[1]
dfm.FsType = rightFields[0]
dfm.UnchangedMountPoint = leftFields[4]
if leftFields[4] == "/" {
dfm.MountPoint = "rootfs"
} else {
dfm.MountPoint = strings.Replace(leftFields[4][1:], "/", "_", -1)
// Because there are mounted FS containing dots
// (like /etc/resolv.conf in Docker containers)
// and this is incompatible with Snap metric name policies
dfm.MountPoint = strings.Replace(dfm.MountPoint, ".", "_", -1)
}
stat := syscall.Statfs_t{}
err := syscall.Statfs(leftFields[4], &stat)
if err != nil {
log.Error(fmt.Sprintf("Error getting filesystem infos for %s", leftFields[4]))
continue
}
// Blocks
dfm.Blocks = (stat.Blocks * uint64(stat.Bsize)) / 1024
dfm.Available = (stat.Bavail * uint64(stat.Bsize)) / 1024
xFree := (stat.Bfree * uint64(stat.Bsize)) / 1024
dfm.Used = dfm.Blocks - xFree
percentAvailable := ceilPercent(dfm.Used, dfm.Used+dfm.Available)
dfm.Capacity = percentAvailable / 100.0
// Inodes
dfm.Inodes = stat.Files
dfm.IFree = stat.Ffree
dfm.IUsed = dfm.Inodes - dfm.IFree
percentIUsed := ceilPercent(dfm.IUsed, dfm.Inodes)
dfm.IUse = percentIUsed / 100.0
dfms = append(dfms, dfm)
}
}
return dfms, nil
}
// Return true if filesystem should not be taken into account
func invalidFS(fs string) bool {
for _, v := range invalidFSTypes {
if fs == v {
return true
}
}
return false
}
// Ceiling function preventing addition of math library
func ceilPercent(v uint64, t uint64) float64 {
// Prevent division by 0 to occur
if t == 0 {
return 0.0
}
var v1i uint64
v1i = v * 100 / t
var v1f float64
v1f = float64(v) * 100.0 / float64(t)
var v2f float64
v2f = float64(v1i)
if v2f-1 < v1f && v1f <= v2f+1 {
addF := 0.0
if v2f < v1f {
addF = 1.0
}
v1f = v2f + addF
}
return v1f
}
func makeNamespace(dfm dfMetric, kind string) []string {
ns := []string{}
ns = append(ns, namespacePrefix...)
ns = append(ns, dfm.MountPoint, kind)
return ns
}
// validate if metric should be exposed
func validateMetric(namespace []string, dfm dfMetric) bool {
mountPoint := namespace[0]
if mountPoint == dfm.MountPoint {
return true
}
return false
}
| {
procPathStats, err := os.Stat(procPath.(string))
if err != nil {
return err
}
if !procPathStats.IsDir() {
return errors.New(fmt.Sprintf("%s is not a directory", procPath.(string)))
}
p.proc_path = procPath.(string)
} | conditional_block |
plugin.go | // +build linux
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package df
import (
"bufio"
"errors"
"fmt"
"os"
"path"
"strings"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/control/plugin/cpolicy"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap-plugin-utilities/config"
)
const (
// PluginName df collector plugin name
PluginName = "df"
// Version of plugin
Version = 3
nsVendor = "intel"
nsClass = "procfs"
nsType = "filesystem"
)
var (
//procPath source of data for metrics
procPath = "/proc"
// prefix in metric namespace
namespacePrefix = []string{nsVendor, nsClass, nsType}
metricsKind = []string{
"space_free",
"space_reserved",
"space_used",
"space_percent_free",
"space_percent_reserved",
"space_percent_used",
"inodes_free",
"inodes_reserved",
"inodes_used",
"inodes_percent_free",
"inodes_percent_reserved",
"inodes_percent_used",
"device_name",
"device_type",
}
invalidFSTypes = []string{
"proc",
"binfmt_misc",
"fuse.gvfsd-fuse",
"sysfs",
"cgroup",
"fusectl",
"pstore",
"debugfs",
"securityfs",
"devpts",
"mqueue",
}
)
// Function to check properness of configuration parameter
// and set plugin attribute accordingly
func (p *dfCollector) setProcPath(cfg interface{}) error {
procPath, err := config.GetConfigItem(cfg, "proc_path")
if err == nil && len(procPath.(string)) > 0 {
procPathStats, err := os.Stat(procPath.(string))
if err != nil {
return err
}
if !procPathStats.IsDir() {
return errors.New(fmt.Sprintf("%s is not a directory", procPath.(string)))
}
p.proc_path = procPath.(string)
}
return nil | // GetMetricTypes returns list of available metric types
// It returns error in case retrieval was not successful
func (p *dfCollector) GetMetricTypes(cfg plugin.ConfigType) ([]plugin.MetricType, error) {
mts := []plugin.MetricType{}
for _, kind := range metricsKind {
mts = append(mts, plugin.MetricType{
Namespace_: core.NewNamespace(namespacePrefix...).
AddDynamicElement("filesystem", "name of filesystem").
AddStaticElement(kind),
Description_: "dynamic filesystem metric: " + kind,
})
}
return mts, nil
}
// CollectMetrics returns list of requested metric values
// It returns error in case retrieval was not successful
func (p *dfCollector) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {
err := p.setProcPath(mts[0])
if err != nil {
return nil, err
}
metrics := []plugin.MetricType{}
curTime := time.Now()
dfms, err := p.stats.collect(p.proc_path)
if err != nil {
return metrics, fmt.Errorf(fmt.Sprintf("Unable to collect metrics from df: %s", err))
}
for _, m := range mts {
ns := m.Namespace()
lns := len(ns)
if lns < 5 {
return nil, fmt.Errorf("Wrong namespace length %d", lns)
}
if ns[lns-2].Value == "*" {
for _, dfm := range dfms {
kind := ns[lns-1].Value
ns1 := core.NewNamespace(createNamespace(dfm.MountPoint, kind)...)
ns1[len(ns1)-2].Name = ns[lns-2].Name
metric := plugin.MetricType{
Timestamp_: curTime,
Namespace_: ns1,
}
fillMetric(kind, dfm, &metric)
metrics = append(metrics, metric)
}
} else {
for _, dfm := range dfms {
if ns[lns-2].Value == dfm.MountPoint {
metric := plugin.MetricType{
Timestamp_: curTime,
Namespace_: ns,
}
kind := ns[lns-1].Value
fillMetric(kind, dfm, &metric)
metrics = append(metrics, metric)
}
}
}
}
return metrics, nil
}
// Function to fill metric with proper (computed) value
func fillMetric(kind string, dfm dfMetric, metric *plugin.MetricType) {
switch kind {
case "space_free":
metric.Data_ = dfm.Available
case "space_reserved":
metric.Data_ = dfm.Blocks - (dfm.Used + dfm.Available)
case "space_used":
metric.Data_ = dfm.Used
case "space_percent_free":
metric.Data_ = 100 * float64(dfm.Available) / float64(dfm.Blocks)
case "space_percent_reserved":
metric.Data_ = 100 * float64(dfm.Blocks-(dfm.Used+dfm.Available)) / float64(dfm.Blocks)
case "space_percent_used":
metric.Data_ = 100 * float64(dfm.Used) / float64(dfm.Blocks)
case "device_name":
metric.Data_ = dfm.Filesystem
case "device_type":
metric.Data_ = dfm.FsType
case "inodes_free":
metric.Data_ = dfm.IFree
case "inodes_reserved":
metric.Data_ = dfm.Inodes - (dfm.IUsed + dfm.IFree)
case "inodes_used":
metric.Data_ = dfm.IUsed
case "inodes_percent_free":
metric.Data_ = 100 * float64(dfm.IFree) / float64(dfm.Inodes)
case "inodes_percent_reserved":
metric.Data_ = 100 * float64(dfm.Inodes-(dfm.IUsed+dfm.IFree)) / float64(dfm.Inodes)
case "inodes_percent_used":
metric.Data_ = 100 * float64(dfm.IUsed) / float64(dfm.Inodes)
}
}
// createNamespace returns namespace slice of strings composed from: vendor, class, type and components of metric name
func createNamespace(elt string, name string) []string {
var suffix = []string{elt, name}
return append(namespacePrefix, suffix...)
}
// GetConfigPolicy returns config policy
// It returns error in case retrieval was not successful
func (p *dfCollector) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
rule, _ := cpolicy.NewStringRule("proc_path", false, "/proc")
node := cpolicy.NewPolicyNode()
node.Add(rule)
cp.Add([]string{nsVendor, nsClass, PluginName}, node)
return cp, nil
}
// NewDfCollector creates new instance of plugin and returns pointer to initialized object.
func NewDfCollector() *dfCollector {
logger := log.New()
return &dfCollector{
stats: &dfStats{},
logger: logger,
proc_path: procPath,
}
}
// Meta returns plugin's metadata
func Meta() *plugin.PluginMeta {
return plugin.NewPluginMeta(
PluginName,
Version,
plugin.CollectorPluginType,
[]string{plugin.SnapGOBContentType},
[]string{plugin.SnapGOBContentType},
plugin.ConcurrencyCount(1),
)
}
type dfCollector struct {
stats collector
logger *log.Logger
proc_path string
}
type dfMetric struct {
Filesystem string
Used, Available, Blocks uint64
Capacity float64
FsType string
MountPoint string
UnchangedMountPoint string
Inodes, IUsed, IFree uint64
IUse float64
}
type collector interface {
collect(string) ([]dfMetric, error)
}
type dfStats struct{}
func (dfs *dfStats) collect(procPath string) ([]dfMetric, error) {
dfms := []dfMetric{}
cpath := path.Join(procPath, "1", "mountinfo")
fh, err := os.Open(cpath)
if err != nil {
log.Error(fmt.Sprintf("Got error %#v", err))
return nil, err
}
defer fh.Close()
scanner := bufio.NewScanner(fh)
for scanner.Scan() {
inLine := scanner.Text()
// https://www.kernel.org/doc/Documentation/filesystems/proc.txt
// or "man proc" + look for mountinfo to see meaning of fields
lParts := strings.Split(inLine, " - ")
if len(lParts) != 2 {
return nil, fmt.Errorf("Wrong format %d parts found instead of 2", len(lParts))
}
leftFields := strings.Fields(lParts[0])
if len(leftFields) != 6 && len(leftFields) != 7 {
return nil, fmt.Errorf("Wrong format %d fields found on the left side instead of 6 or 7", len(leftFields))
}
rightFields := strings.Fields(lParts[1])
if len(rightFields) != 3 {
return nil, fmt.Errorf("Wrong format %d fields found on the right side instead of 7 min", len(rightFields))
}
// Keep only meaningfull filesystems
if !invalidFS(rightFields[0]) {
var dfm dfMetric
dfm.Filesystem = rightFields[1]
dfm.FsType = rightFields[0]
dfm.UnchangedMountPoint = leftFields[4]
if leftFields[4] == "/" {
dfm.MountPoint = "rootfs"
} else {
dfm.MountPoint = strings.Replace(leftFields[4][1:], "/", "_", -1)
// Because there are mounted FS containing dots
// (like /etc/resolv.conf in Docker containers)
// and this is incompatible with Snap metric name policies
dfm.MountPoint = strings.Replace(dfm.MountPoint, ".", "_", -1)
}
stat := syscall.Statfs_t{}
err := syscall.Statfs(leftFields[4], &stat)
if err != nil {
log.Error(fmt.Sprintf("Error getting filesystem infos for %s", leftFields[4]))
continue
}
// Blocks
dfm.Blocks = (stat.Blocks * uint64(stat.Bsize)) / 1024
dfm.Available = (stat.Bavail * uint64(stat.Bsize)) / 1024
xFree := (stat.Bfree * uint64(stat.Bsize)) / 1024
dfm.Used = dfm.Blocks - xFree
percentAvailable := ceilPercent(dfm.Used, dfm.Used+dfm.Available)
dfm.Capacity = percentAvailable / 100.0
// Inodes
dfm.Inodes = stat.Files
dfm.IFree = stat.Ffree
dfm.IUsed = dfm.Inodes - dfm.IFree
percentIUsed := ceilPercent(dfm.IUsed, dfm.Inodes)
dfm.IUse = percentIUsed / 100.0
dfms = append(dfms, dfm)
}
}
return dfms, nil
}
// Return true if filesystem should not be taken into account
func invalidFS(fs string) bool {
for _, v := range invalidFSTypes {
if fs == v {
return true
}
}
return false
}
// Ceiling function preventing addition of math library
func ceilPercent(v uint64, t uint64) float64 {
// Prevent division by 0 to occur
if t == 0 {
return 0.0
}
var v1i uint64
v1i = v * 100 / t
var v1f float64
v1f = float64(v) * 100.0 / float64(t)
var v2f float64
v2f = float64(v1i)
if v2f-1 < v1f && v1f <= v2f+1 {
addF := 0.0
if v2f < v1f {
addF = 1.0
}
v1f = v2f + addF
}
return v1f
}
func makeNamespace(dfm dfMetric, kind string) []string {
ns := []string{}
ns = append(ns, namespacePrefix...)
ns = append(ns, dfm.MountPoint, kind)
return ns
}
// validate if metric should be exposed
func validateMetric(namespace []string, dfm dfMetric) bool {
mountPoint := namespace[0]
if mountPoint == dfm.MountPoint {
return true
}
return false
} | }
| random_line_split |
plugin.go | // +build linux
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package df
import (
"bufio"
"errors"
"fmt"
"os"
"path"
"strings"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/intelsdi-x/snap/control/plugin"
"github.com/intelsdi-x/snap/control/plugin/cpolicy"
"github.com/intelsdi-x/snap/core"
"github.com/intelsdi-x/snap-plugin-utilities/config"
)
const (
// PluginName df collector plugin name
PluginName = "df"
// Version of plugin
Version = 3
nsVendor = "intel"
nsClass = "procfs"
nsType = "filesystem"
)
var (
//procPath source of data for metrics
procPath = "/proc"
// prefix in metric namespace
namespacePrefix = []string{nsVendor, nsClass, nsType}
metricsKind = []string{
"space_free",
"space_reserved",
"space_used",
"space_percent_free",
"space_percent_reserved",
"space_percent_used",
"inodes_free",
"inodes_reserved",
"inodes_used",
"inodes_percent_free",
"inodes_percent_reserved",
"inodes_percent_used",
"device_name",
"device_type",
}
invalidFSTypes = []string{
"proc",
"binfmt_misc",
"fuse.gvfsd-fuse",
"sysfs",
"cgroup",
"fusectl",
"pstore",
"debugfs",
"securityfs",
"devpts",
"mqueue",
}
)
// Function to check properness of configuration parameter
// and set plugin attribute accordingly
func (p *dfCollector) setProcPath(cfg interface{}) error {
procPath, err := config.GetConfigItem(cfg, "proc_path")
if err == nil && len(procPath.(string)) > 0 {
procPathStats, err := os.Stat(procPath.(string))
if err != nil {
return err
}
if !procPathStats.IsDir() {
return errors.New(fmt.Sprintf("%s is not a directory", procPath.(string)))
}
p.proc_path = procPath.(string)
}
return nil
}
// GetMetricTypes returns list of available metric types
// It returns error in case retrieval was not successful
func (p *dfCollector) GetMetricTypes(cfg plugin.ConfigType) ([]plugin.MetricType, error) {
mts := []plugin.MetricType{}
for _, kind := range metricsKind {
mts = append(mts, plugin.MetricType{
Namespace_: core.NewNamespace(namespacePrefix...).
AddDynamicElement("filesystem", "name of filesystem").
AddStaticElement(kind),
Description_: "dynamic filesystem metric: " + kind,
})
}
return mts, nil
}
// CollectMetrics returns list of requested metric values
// It returns error in case retrieval was not successful
func (p *dfCollector) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {
err := p.setProcPath(mts[0])
if err != nil {
return nil, err
}
metrics := []plugin.MetricType{}
curTime := time.Now()
dfms, err := p.stats.collect(p.proc_path)
if err != nil {
return metrics, fmt.Errorf(fmt.Sprintf("Unable to collect metrics from df: %s", err))
}
for _, m := range mts {
ns := m.Namespace()
lns := len(ns)
if lns < 5 {
return nil, fmt.Errorf("Wrong namespace length %d", lns)
}
if ns[lns-2].Value == "*" {
for _, dfm := range dfms {
kind := ns[lns-1].Value
ns1 := core.NewNamespace(createNamespace(dfm.MountPoint, kind)...)
ns1[len(ns1)-2].Name = ns[lns-2].Name
metric := plugin.MetricType{
Timestamp_: curTime,
Namespace_: ns1,
}
fillMetric(kind, dfm, &metric)
metrics = append(metrics, metric)
}
} else {
for _, dfm := range dfms {
if ns[lns-2].Value == dfm.MountPoint {
metric := plugin.MetricType{
Timestamp_: curTime,
Namespace_: ns,
}
kind := ns[lns-1].Value
fillMetric(kind, dfm, &metric)
metrics = append(metrics, metric)
}
}
}
}
return metrics, nil
}
// Function to fill metric with proper (computed) value
func fillMetric(kind string, dfm dfMetric, metric *plugin.MetricType) {
switch kind {
case "space_free":
metric.Data_ = dfm.Available
case "space_reserved":
metric.Data_ = dfm.Blocks - (dfm.Used + dfm.Available)
case "space_used":
metric.Data_ = dfm.Used
case "space_percent_free":
metric.Data_ = 100 * float64(dfm.Available) / float64(dfm.Blocks)
case "space_percent_reserved":
metric.Data_ = 100 * float64(dfm.Blocks-(dfm.Used+dfm.Available)) / float64(dfm.Blocks)
case "space_percent_used":
metric.Data_ = 100 * float64(dfm.Used) / float64(dfm.Blocks)
case "device_name":
metric.Data_ = dfm.Filesystem
case "device_type":
metric.Data_ = dfm.FsType
case "inodes_free":
metric.Data_ = dfm.IFree
case "inodes_reserved":
metric.Data_ = dfm.Inodes - (dfm.IUsed + dfm.IFree)
case "inodes_used":
metric.Data_ = dfm.IUsed
case "inodes_percent_free":
metric.Data_ = 100 * float64(dfm.IFree) / float64(dfm.Inodes)
case "inodes_percent_reserved":
metric.Data_ = 100 * float64(dfm.Inodes-(dfm.IUsed+dfm.IFree)) / float64(dfm.Inodes)
case "inodes_percent_used":
metric.Data_ = 100 * float64(dfm.IUsed) / float64(dfm.Inodes)
}
}
// createNamespace returns namespace slice of strings composed from: vendor, class, type and components of metric name
func createNamespace(elt string, name string) []string |
// GetConfigPolicy returns config policy
// It returns error in case retrieval was not successful
func (p *dfCollector) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
cp := cpolicy.New()
rule, _ := cpolicy.NewStringRule("proc_path", false, "/proc")
node := cpolicy.NewPolicyNode()
node.Add(rule)
cp.Add([]string{nsVendor, nsClass, PluginName}, node)
return cp, nil
}
// NewDfCollector creates new instance of plugin and returns pointer to initialized object.
func NewDfCollector() *dfCollector {
logger := log.New()
return &dfCollector{
stats: &dfStats{},
logger: logger,
proc_path: procPath,
}
}
// Meta returns plugin's metadata
func Meta() *plugin.PluginMeta {
return plugin.NewPluginMeta(
PluginName,
Version,
plugin.CollectorPluginType,
[]string{plugin.SnapGOBContentType},
[]string{plugin.SnapGOBContentType},
plugin.ConcurrencyCount(1),
)
}
type dfCollector struct {
stats collector
logger *log.Logger
proc_path string
}
type dfMetric struct {
Filesystem string
Used, Available, Blocks uint64
Capacity float64
FsType string
MountPoint string
UnchangedMountPoint string
Inodes, IUsed, IFree uint64
IUse float64
}
type collector interface {
collect(string) ([]dfMetric, error)
}
type dfStats struct{}
func (dfs *dfStats) collect(procPath string) ([]dfMetric, error) {
dfms := []dfMetric{}
cpath := path.Join(procPath, "1", "mountinfo")
fh, err := os.Open(cpath)
if err != nil {
log.Error(fmt.Sprintf("Got error %#v", err))
return nil, err
}
defer fh.Close()
scanner := bufio.NewScanner(fh)
for scanner.Scan() {
inLine := scanner.Text()
// https://www.kernel.org/doc/Documentation/filesystems/proc.txt
// or "man proc" + look for mountinfo to see meaning of fields
lParts := strings.Split(inLine, " - ")
if len(lParts) != 2 {
return nil, fmt.Errorf("Wrong format %d parts found instead of 2", len(lParts))
}
leftFields := strings.Fields(lParts[0])
if len(leftFields) != 6 && len(leftFields) != 7 {
return nil, fmt.Errorf("Wrong format %d fields found on the left side instead of 6 or 7", len(leftFields))
}
rightFields := strings.Fields(lParts[1])
if len(rightFields) != 3 {
return nil, fmt.Errorf("Wrong format %d fields found on the right side instead of 7 min", len(rightFields))
}
// Keep only meaningfull filesystems
if !invalidFS(rightFields[0]) {
var dfm dfMetric
dfm.Filesystem = rightFields[1]
dfm.FsType = rightFields[0]
dfm.UnchangedMountPoint = leftFields[4]
if leftFields[4] == "/" {
dfm.MountPoint = "rootfs"
} else {
dfm.MountPoint = strings.Replace(leftFields[4][1:], "/", "_", -1)
// Because there are mounted FS containing dots
// (like /etc/resolv.conf in Docker containers)
// and this is incompatible with Snap metric name policies
dfm.MountPoint = strings.Replace(dfm.MountPoint, ".", "_", -1)
}
stat := syscall.Statfs_t{}
err := syscall.Statfs(leftFields[4], &stat)
if err != nil {
log.Error(fmt.Sprintf("Error getting filesystem infos for %s", leftFields[4]))
continue
}
// Blocks
dfm.Blocks = (stat.Blocks * uint64(stat.Bsize)) / 1024
dfm.Available = (stat.Bavail * uint64(stat.Bsize)) / 1024
xFree := (stat.Bfree * uint64(stat.Bsize)) / 1024
dfm.Used = dfm.Blocks - xFree
percentAvailable := ceilPercent(dfm.Used, dfm.Used+dfm.Available)
dfm.Capacity = percentAvailable / 100.0
// Inodes
dfm.Inodes = stat.Files
dfm.IFree = stat.Ffree
dfm.IUsed = dfm.Inodes - dfm.IFree
percentIUsed := ceilPercent(dfm.IUsed, dfm.Inodes)
dfm.IUse = percentIUsed / 100.0
dfms = append(dfms, dfm)
}
}
return dfms, nil
}
// Return true if filesystem should not be taken into account
func invalidFS(fs string) bool {
for _, v := range invalidFSTypes {
if fs == v {
return true
}
}
return false
}
// Ceiling function preventing addition of math library
func ceilPercent(v uint64, t uint64) float64 {
// Prevent division by 0 to occur
if t == 0 {
return 0.0
}
var v1i uint64
v1i = v * 100 / t
var v1f float64
v1f = float64(v) * 100.0 / float64(t)
var v2f float64
v2f = float64(v1i)
if v2f-1 < v1f && v1f <= v2f+1 {
addF := 0.0
if v2f < v1f {
addF = 1.0
}
v1f = v2f + addF
}
return v1f
}
func makeNamespace(dfm dfMetric, kind string) []string {
ns := []string{}
ns = append(ns, namespacePrefix...)
ns = append(ns, dfm.MountPoint, kind)
return ns
}
// validate if metric should be exposed
func validateMetric(namespace []string, dfm dfMetric) bool {
mountPoint := namespace[0]
if mountPoint == dfm.MountPoint {
return true
}
return false
}
| {
var suffix = []string{elt, name}
return append(namespacePrefix, suffix...)
} | identifier_body |
aes.rs | //! Interface to the AES peripheral.
//!
//! Note that the AES peripheral is only available on some MCUs in the L0/L1/L2
//! families. Check the datasheet for more information.
//!
//! See STM32L0x2 reference manual, chapter 18.
use core::{
convert::TryInto,
ops::{Deref, DerefMut},
pin::Pin,
};
use as_slice::{AsMutSlice, AsSlice};
use nb::block;
use void::Void;
use crate::{
dma,
pac::{
self,
aes::{self, cr},
},
rcc::{Enable, Rcc, Reset},
};
/// Entry point to the AES API
pub struct AES {
aes: pac::AES,
}
impl AES {
/// Initialize the AES peripheral
pub fn new(aes: pac::AES, rcc: &mut Rcc) -> Self {
// Enable peripheral clock
pac::AES::enable(rcc);
// Reset peripheral
pac::AES::reset(rcc);
// Configure peripheral
aes.cr.write(|w| {
// Enable DMA
w.dmaouten().set_bit();
w.dmainen().set_bit();
// Disable interrupts
w.errie().clear_bit();
w.ccfie().clear_bit()
});
Self { aes }
}
/// Enable the AES peripheral
///
/// Returns a [`Stream`] instance which can be used to encrypt or decrypt
/// data using the mode selected with the `mode` argument.
///
/// Consumes the `AES` instance. You can get it back later once you're done
/// with the `Stream`, using [`Stream::disable`].
pub fn enable<M>(self, mode: M, key: [u32; 4]) -> Stream
where
M: Mode,
{
// Write key. This is safe, as the register accepts the full range of
// `u32`.
self.aes.keyr0.write(|w| w.bits(key[0]));
self.aes.keyr1.write(|w| w.bits(key[1]));
self.aes.keyr2.write(|w| w.bits(key[2]));
self.aes.keyr3.write(|w| w.bits(key[3]));
mode.prepare(&self.aes);
self.aes.cr.modify(|_, w| {
// Select mode
mode.select(w);
// Configure for stream of bytes
// Safe, as we write a valid byte pattern.
w.datatype().bits(0b10);
// Enable peripheral
w.en().set_bit()
});
Stream {
aes: self,
rx: Rx(()),
tx: Tx(()),
}
}
}
/// An active encryption/decryption stream
///
/// You can get an instance of this struct by calling [`AES::enable`].
pub struct Stream {
aes: AES,
/// Can be used to write data to the AES peripheral
pub tx: Tx,
/// Can be used to read data from the AES peripheral
pub rx: Rx,
}
impl Stream {
/// Processes one block of data
///
/// Writes one block of data to the AES peripheral, wait until it is
/// processed then reads the processed block and returns it.
///
/// Whether this is encryption or decryption depends on the mode that was
/// selected when this `Stream` was created.
pub fn process(&mut self, input: &Block) -> Result<Block, Error> {
self.tx.write(input)?;
// Can't panic. Error value of `Rx::read` is `Void`.
let output = block!(self.rx.read()).unwrap();
Ok(output)
}
/// Disable the AES peripheral
///
/// Consumes the stream and returns the disabled [`AES`] instance. Call this
/// method when you're done encrypting/decrypting data. You can then create
/// another `Stream` using [`AES::enable`].
pub fn disable(self) -> AES {
// Disable AES
self.aes.aes.cr.modify(|_, w| w.en().clear_bit());
self.aes
}
}
/// Can be used to write data to the AES peripheral
///
/// You can access this struct via [`Stream`].
pub struct Tx(());
impl Tx {
/// Write a block to the AES peripheral
///
/// Please note that only one block can be written before you need to read
/// the processed block back using [`Read::read`]. Calling this method
/// multiple times without calling [`Read::read`] in between will result in
/// an error to be returned.
pub fn write(&mut self, block: &Block) -> Result<(), Error> {
// Get access to the registers. This is safe, because:
// - `Tx` has exclusive access to DINR.
// - We only use SR for an atomic read.
let (dinr, sr) = unsafe {
let aes = &*pac::AES::ptr();
(&aes.dinr, &aes.sr)
};
// Write input data to DINR
//
// See STM32L0x2 reference manual, section 18.4.10.
for i in (0..4).rev() {
dinr.write(|w| {
let i = i * 4;
let word = &block[i..i + 4];
// Can't panic, because `word` is 4 bytes long.
let word = word.try_into().unwrap();
let word = u32::from_le_bytes(word);
w.bits(word)
});
}
// Was there an unexpected write? If so, a computation is already
// ongoing and the user needs to call `Rx::read` next. If I understand
// the documentation correctly, our writes to the register above
// shouldn't have affected the ongoing computation.
if sr.read().wrerr().bit_is_set() {
return Err(Error::Busy);
}
Ok(())
}
/// Writes the provided buffer to the AES peripheral using DMA
///
/// Returns a DMA transfer that is ready to be started. It needs to be
/// started for anything to happen.
///
/// # Panics
///
/// Panics, if the buffer length is larger than `u16::max_value()`.
///
/// The AES peripheral works with 128-bit blocks, which means the buffer
/// length must be a multiple of 16. Panics, if this is not the case.
///
/// Panics, if the buffer is not aligned to a word boundary.
pub fn | <Buffer, Channel>(
self,
dma: &mut dma::Handle,
buffer: Pin<Buffer>,
channel: Channel,
) -> Transfer<Self, Channel, Buffer, dma::Ready>
where
Self: dma::Target<Channel>,
Buffer: Deref + 'static,
Buffer::Target: AsSlice<Element = u8>,
Channel: dma::Channel,
{
assert!(buffer.as_slice().len() % 16 == 0);
// Safe, because we're only taking the address of a register.
let address = &unsafe { &*pac::AES::ptr() }.dinr as *const _ as u32;
// Safe, because the traits bounds of this method guarantee that
// `buffer` can be read from.
unsafe {
Transfer::new(
dma,
self,
channel,
buffer,
address,
// This priority should be lower than the priority of the
// transfer created in `read_all`. I'm not sure how relevant
// that is in practice, but it makes sense, and as I've seen a
// comment to that effect in ST's HAL code, I'd rather be
// careful than risk weird bugs.
dma::Priority::high(),
dma::Direction::memory_to_peripheral(),
)
}
}
}
/// Can be used to read data from the AES peripheral
///
/// You can access this struct via [`Stream`].
pub struct Rx(());
impl Rx {
pub fn read(&mut self) -> nb::Result<Block, Void> {
// Get access to the registers. This is safe, because:
// - We only use SR for an atomic read.
// - `Rx` has exclusive access to DOUTR.
// - While it exists, `Rx` has exlusive access to CR.
let (sr, doutr, cr) = unsafe {
let aes = &*pac::AES::ptr();
(&aes.sr, &aes.doutr, &aes.cr)
};
// Is a computation complete?
if sr.read().ccf().bit_is_clear() {
return Err(nb::Error::WouldBlock);
}
// Read output data from DOUTR
//
// See STM32L0x2 reference manual, section 18.4.10.
let mut block = [0; 16];
for i in (0..4).rev() {
let i = i * 4;
let word = doutr.read().bits();
let word = word.to_le_bytes();
(block[i..i + 4]).copy_from_slice(&word);
}
// Clear CCF flag
cr.modify(|_, w| w.ccfc().set_bit());
Ok(block)
}
/// Reads data from the AES peripheral into the provided buffer using DMA
///
/// Returns a DMA transfer that is ready to be started. It needs to be
/// started for anything to happen.
///
/// # Panics
///
/// Panics, if the buffer length is larger than `u16::max_value()`.
///
/// The AES peripheral works with 128-bit blocks, which means the buffer
/// length must be a multiple of 16. Panics, if this is not the case.
///
/// Panics, if the buffer is not aligned to a word boundary.
pub fn read_all<Buffer, Channel>(
self,
dma: &mut dma::Handle,
buffer: Pin<Buffer>,
channel: Channel,
) -> Transfer<Self, Channel, Buffer, dma::Ready>
where
Self: dma::Target<Channel>,
Buffer: DerefMut + 'static,
Buffer::Target: AsMutSlice<Element = u8>,
Channel: dma::Channel,
{
assert!(buffer.as_slice().len() % 16 == 0);
// Safe, because we're only taking the address of a register.
let address = &unsafe { &*pac::AES::ptr() }.doutr as *const _ as u32;
// Safe, because the traits bounds of this method guarantee that
// `buffer` can be written to.
unsafe {
Transfer::new(
dma,
self,
channel,
buffer,
address,
// This priority should be higher than the priority of the
// transfer created in `write_all`. I'm not sure how relevant
// that is in practice, but it makes sense, and as I've seen a
// comment to that effect in ST's HAL code, I'd rather be
// careful than risk weird bugs.
dma::Priority::very_high(),
dma::Direction::peripheral_to_memory(),
)
}
}
}
/// Implemented for all chaining modes
///
/// This is mostly an internal trait. The user won't typically need to use or
/// implement this, except to call the various static methods that create a
/// mode.
pub trait Mode {
fn prepare(&self, _: &aes::RegisterBlock);
fn select(&self, _: &mut cr::W);
}
impl dyn Mode {
/// Use this with [`AES::enable`] to encrypt using ECB
pub fn ecb_encrypt() -> ECB<Encrypt> {
ECB(Encrypt)
}
/// Use this with [`AES::enable`] to decrypt using ECB
pub fn ecb_decrypt() -> ECB<Decrypt> {
ECB(Decrypt)
}
/// Use this with [`AES::enable`] to encrypt using CBC
pub fn cbc_encrypt(init_vector: [u32; 4]) -> CBC<Encrypt> {
CBC {
_mode: Encrypt,
init_vector,
}
}
/// Use this with [`AES::enable`] to decrypt using CBC
pub fn cbc_decrypt(init_vector: [u32; 4]) -> CBC<Decrypt> {
CBC {
_mode: Decrypt,
init_vector,
}
}
/// Use this with [`AES::enable`] to encrypt or decrypt using CTR
pub fn ctr(init_vector: [u32; 3]) -> CTR {
CTR { init_vector }
}
}
/// The ECB (electronic code book) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using ECB
/// mode. `Mode` must be either [`Encrypt`] or [`Decrypt`].
///
/// You gen get an instance of this struct via [`Mode::ecb_encrypt`] or
/// [`Mode::ecb_decrypt`].
pub struct ECB<Mode>(Mode);
impl Mode for ECB<Encrypt> {
fn prepare(&self, _: &aes::RegisterBlock) {
// Nothing to do.
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select ECB chaining mode
w.chmod().bits(0b00);
// Select encryption mode
w.mode().bits(0b00);
}
}
}
impl Mode for ECB<Decrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
derive_key(aes)
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select ECB chaining mode
w.chmod().bits(0b00);
// Select decryption mode
w.mode().bits(0b10);
}
}
}
/// The CBC (cipher block chaining) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using CBC
/// mode. `Mode` must be either [`Encrypt`] or [`Decrypt`].
///
/// You gen get an instance of this struct via [`Mode::cbc_encrypt`] or
/// [`Mode::cbc_decrypt`].
pub struct CBC<Mode> {
_mode: Mode,
init_vector: [u32; 4],
}
impl Mode for CBC<Encrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
// Safe, as the registers accept the full range of `u32`.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(self.init_vector[3]));
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select CBC chaining mode
w.chmod().bits(0b01);
// Select encryption mode
w.mode().bits(0b00);
}
}
}
impl Mode for CBC<Decrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
derive_key(aes);
// Safe, as the registers accept the full range of `u32`.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(self.init_vector[3]));
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select CBC chaining mode
w.chmod().bits(0b01);
// Select decryption mode
w.mode().bits(0b10);
}
}
}
/// The CTR (counter) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using CTR
/// mode. In CTR mode, encryption and decryption are technically identical, so
/// further qualification is not required.
///
/// You gen get an instance of this struct via [`Mode::ctr`].
pub struct CTR {
init_vector: [u32; 3],
}
impl Mode for CTR {
fn prepare(&self, aes: &aes::RegisterBlock) {
// Initialize initialization vector
//
// See STM32L0x2 reference manual, table 78 on page 408.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(0x0001)); // counter
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select Counter Mode (CTR) mode
w.chmod().bits(0b10);
// These bits mean encryption mode, but in CTR mode,
// encryption and descryption are technically identical, so this
// is fine for either mode.
w.mode().bits(0b00);
}
}
}
fn derive_key(aes: &aes::RegisterBlock) {
// Select key derivation mode. This is safe, as we're writing a valid bit
// pattern.
aes.cr.modify(|_, w| w.mode().bits(0b01));
// Enable the peripheral. It will be automatically disabled again once the
// key has been derived.
aes.cr.modify(|_, w| w.en().set_bit());
// Wait for key derivation to finish
while aes.sr.read().ccf().bit_is_clear() {}
}
/// Used to identify encryption mode
pub struct Encrypt;
/// Used to identify decryption mode
pub struct Decrypt;
/// A 128-bit block
///
/// The AES peripheral processes 128 bits at a time, so this represents one unit
/// of processing.
pub type Block = [u8; 16];
#[derive(Debug)]
pub enum Error {
/// AES peripheral is busy
Busy,
}
/// Wrapper around a [`dma::Transfer`].
///
/// This struct is required, because under the hood, the AES peripheral only
/// supports 32-bit word DMA transfers, while the public API works with byte
/// slices.
pub struct Transfer<Target, Channel, Buffer, State> {
buffer: Pin<Buffer>,
inner: dma::Transfer<Target, Channel, dma::PtrBuffer<u32>, State>,
}
impl<Target, Channel, Buffer> Transfer<Target, Channel, Buffer, dma::Ready>
where
Target: dma::Target<Channel>,
Channel: dma::Channel,
Buffer: Deref + 'static,
Buffer::Target: AsSlice<Element = u8>,
{
/// Create a new instance of `Transfer`
///
/// # Safety
///
/// If this is used to prepare a memory-to-peripheral transfer, the caller
/// must make sure that the buffer can be read from safely.
///
/// If this is used to prepare a peripheral-to-memory transfer, the caller
/// must make sure that the buffer can be written to safely.
///
/// The caller must guarantee that the buffer length is a multiple of 4.
unsafe fn new(
dma: &mut dma::Handle,
target: Target,
channel: Channel,
buffer: Pin<Buffer>,
address: u32,
priority: dma::Priority,
dir: dma::Direction,
) -> Self {
let num_words = buffer.as_slice().len() / 4;
let transfer = dma::Transfer::new(
dma,
target,
channel,
// The caller must guarantee that our length is a multiple of 4, so
// this should be fine.
Pin::new(dma::PtrBuffer {
ptr: buffer.as_slice().as_ptr() as *const u32,
len: num_words,
}),
num_words,
address,
priority,
dir,
false,
);
Self {
buffer,
inner: transfer,
}
}
/// Enables the provided interrupts
///
/// This setting only affects this transfer. It doesn't affect transfer on
/// other channels, or subsequent transfers on the same channel.
pub fn enable_interrupts(&mut self, interrupts: dma::Interrupts) {
self.inner.enable_interrupts(interrupts)
}
/// Start the DMA transfer
///
/// Consumes this instance of `Transfer` and returns a new one, with its
/// state changes to indicate that the transfer has been started.
pub fn start(self) -> Transfer<Target, Channel, Buffer, dma::Started> {
Transfer {
buffer: self.buffer,
inner: self.inner.start(),
}
}
}
impl<Target, Channel, Buffer> Transfer<Target, Channel, Buffer, dma::Started>
where
Channel: dma::Channel,
{
/// Indicates whether the transfer is still ongoing
pub fn is_active(&self) -> bool {
self.inner.is_active()
}
/// Waits for the transfer to finish and returns the owned resources
///
/// This function will busily wait until the transfer is finished. If you
/// don't want this, please call this function only once you know that the
/// transfer has finished.
///
/// This function will return immediately, if [`Transfer::is_active`]
/// returns `false`.
pub fn wait(self) -> dma::TransferResourcesResult<Target, Channel, Buffer> {
let (res, err) = match self.inner.wait() {
Ok(res) => (res, None),
Err((res, err)) => (res, Some(err)),
};
let res = dma::TransferResources {
target: res.target,
channel: res.channel,
buffer: self.buffer,
};
match err {
None => Ok(res),
Some(err) => Err((res, err)),
}
}
}
| write_all | identifier_name |
aes.rs | //! Interface to the AES peripheral.
//!
//! Note that the AES peripheral is only available on some MCUs in the L0/L1/L2
//! families. Check the datasheet for more information.
//!
//! See STM32L0x2 reference manual, chapter 18.
use core::{
convert::TryInto,
ops::{Deref, DerefMut},
pin::Pin,
};
use as_slice::{AsMutSlice, AsSlice};
use nb::block;
use void::Void;
use crate::{
dma,
pac::{
self,
aes::{self, cr},
},
rcc::{Enable, Rcc, Reset},
};
/// Entry point to the AES API
pub struct AES {
aes: pac::AES,
}
impl AES {
/// Initialize the AES peripheral
pub fn new(aes: pac::AES, rcc: &mut Rcc) -> Self {
// Enable peripheral clock
pac::AES::enable(rcc);
// Reset peripheral
pac::AES::reset(rcc);
// Configure peripheral
aes.cr.write(|w| {
// Enable DMA
w.dmaouten().set_bit();
w.dmainen().set_bit();
// Disable interrupts
w.errie().clear_bit();
w.ccfie().clear_bit()
});
Self { aes }
}
/// Enable the AES peripheral
///
/// Returns a [`Stream`] instance which can be used to encrypt or decrypt
/// data using the mode selected with the `mode` argument.
///
/// Consumes the `AES` instance. You can get it back later once you're done
/// with the `Stream`, using [`Stream::disable`].
pub fn enable<M>(self, mode: M, key: [u32; 4]) -> Stream
where
M: Mode,
{
// Write key. This is safe, as the register accepts the full range of
// `u32`.
self.aes.keyr0.write(|w| w.bits(key[0]));
self.aes.keyr1.write(|w| w.bits(key[1]));
self.aes.keyr2.write(|w| w.bits(key[2]));
self.aes.keyr3.write(|w| w.bits(key[3]));
mode.prepare(&self.aes);
self.aes.cr.modify(|_, w| {
// Select mode
mode.select(w);
// Configure for stream of bytes
// Safe, as we write a valid byte pattern.
w.datatype().bits(0b10);
// Enable peripheral
w.en().set_bit()
});
Stream {
aes: self, | }
}
/// An active encryption/decryption stream
///
/// You can get an instance of this struct by calling [`AES::enable`].
pub struct Stream {
aes: AES,
/// Can be used to write data to the AES peripheral
pub tx: Tx,
/// Can be used to read data from the AES peripheral
pub rx: Rx,
}
impl Stream {
/// Processes one block of data
///
/// Writes one block of data to the AES peripheral, wait until it is
/// processed then reads the processed block and returns it.
///
/// Whether this is encryption or decryption depends on the mode that was
/// selected when this `Stream` was created.
pub fn process(&mut self, input: &Block) -> Result<Block, Error> {
self.tx.write(input)?;
// Can't panic. Error value of `Rx::read` is `Void`.
let output = block!(self.rx.read()).unwrap();
Ok(output)
}
/// Disable the AES peripheral
///
/// Consumes the stream and returns the disabled [`AES`] instance. Call this
/// method when you're done encrypting/decrypting data. You can then create
/// another `Stream` using [`AES::enable`].
pub fn disable(self) -> AES {
// Disable AES
self.aes.aes.cr.modify(|_, w| w.en().clear_bit());
self.aes
}
}
/// Can be used to write data to the AES peripheral
///
/// You can access this struct via [`Stream`].
pub struct Tx(());
impl Tx {
/// Write a block to the AES peripheral
///
/// Please note that only one block can be written before you need to read
/// the processed block back using [`Read::read`]. Calling this method
/// multiple times without calling [`Read::read`] in between will result in
/// an error to be returned.
pub fn write(&mut self, block: &Block) -> Result<(), Error> {
// Get access to the registers. This is safe, because:
// - `Tx` has exclusive access to DINR.
// - We only use SR for an atomic read.
let (dinr, sr) = unsafe {
let aes = &*pac::AES::ptr();
(&aes.dinr, &aes.sr)
};
// Write input data to DINR
//
// See STM32L0x2 reference manual, section 18.4.10.
for i in (0..4).rev() {
dinr.write(|w| {
let i = i * 4;
let word = &block[i..i + 4];
// Can't panic, because `word` is 4 bytes long.
let word = word.try_into().unwrap();
let word = u32::from_le_bytes(word);
w.bits(word)
});
}
// Was there an unexpected write? If so, a computation is already
// ongoing and the user needs to call `Rx::read` next. If I understand
// the documentation correctly, our writes to the register above
// shouldn't have affected the ongoing computation.
if sr.read().wrerr().bit_is_set() {
return Err(Error::Busy);
}
Ok(())
}
/// Writes the provided buffer to the AES peripheral using DMA
///
/// Returns a DMA transfer that is ready to be started. It needs to be
/// started for anything to happen.
///
/// # Panics
///
/// Panics, if the buffer length is larger than `u16::max_value()`.
///
/// The AES peripheral works with 128-bit blocks, which means the buffer
/// length must be a multiple of 16. Panics, if this is not the case.
///
/// Panics, if the buffer is not aligned to a word boundary.
pub fn write_all<Buffer, Channel>(
self,
dma: &mut dma::Handle,
buffer: Pin<Buffer>,
channel: Channel,
) -> Transfer<Self, Channel, Buffer, dma::Ready>
where
Self: dma::Target<Channel>,
Buffer: Deref + 'static,
Buffer::Target: AsSlice<Element = u8>,
Channel: dma::Channel,
{
assert!(buffer.as_slice().len() % 16 == 0);
// Safe, because we're only taking the address of a register.
let address = &unsafe { &*pac::AES::ptr() }.dinr as *const _ as u32;
// Safe, because the traits bounds of this method guarantee that
// `buffer` can be read from.
unsafe {
Transfer::new(
dma,
self,
channel,
buffer,
address,
// This priority should be lower than the priority of the
// transfer created in `read_all`. I'm not sure how relevant
// that is in practice, but it makes sense, and as I've seen a
// comment to that effect in ST's HAL code, I'd rather be
// careful than risk weird bugs.
dma::Priority::high(),
dma::Direction::memory_to_peripheral(),
)
}
}
}
/// Can be used to read data from the AES peripheral
///
/// You can access this struct via [`Stream`].
pub struct Rx(());
impl Rx {
pub fn read(&mut self) -> nb::Result<Block, Void> {
// Get access to the registers. This is safe, because:
// - We only use SR for an atomic read.
// - `Rx` has exclusive access to DOUTR.
// - While it exists, `Rx` has exlusive access to CR.
let (sr, doutr, cr) = unsafe {
let aes = &*pac::AES::ptr();
(&aes.sr, &aes.doutr, &aes.cr)
};
// Is a computation complete?
if sr.read().ccf().bit_is_clear() {
return Err(nb::Error::WouldBlock);
}
// Read output data from DOUTR
//
// See STM32L0x2 reference manual, section 18.4.10.
let mut block = [0; 16];
for i in (0..4).rev() {
let i = i * 4;
let word = doutr.read().bits();
let word = word.to_le_bytes();
(block[i..i + 4]).copy_from_slice(&word);
}
// Clear CCF flag
cr.modify(|_, w| w.ccfc().set_bit());
Ok(block)
}
/// Reads data from the AES peripheral into the provided buffer using DMA
///
/// Returns a DMA transfer that is ready to be started. It needs to be
/// started for anything to happen.
///
/// # Panics
///
/// Panics, if the buffer length is larger than `u16::max_value()`.
///
/// The AES peripheral works with 128-bit blocks, which means the buffer
/// length must be a multiple of 16. Panics, if this is not the case.
///
/// Panics, if the buffer is not aligned to a word boundary.
pub fn read_all<Buffer, Channel>(
self,
dma: &mut dma::Handle,
buffer: Pin<Buffer>,
channel: Channel,
) -> Transfer<Self, Channel, Buffer, dma::Ready>
where
Self: dma::Target<Channel>,
Buffer: DerefMut + 'static,
Buffer::Target: AsMutSlice<Element = u8>,
Channel: dma::Channel,
{
assert!(buffer.as_slice().len() % 16 == 0);
// Safe, because we're only taking the address of a register.
let address = &unsafe { &*pac::AES::ptr() }.doutr as *const _ as u32;
// Safe, because the traits bounds of this method guarantee that
// `buffer` can be written to.
unsafe {
Transfer::new(
dma,
self,
channel,
buffer,
address,
// This priority should be higher than the priority of the
// transfer created in `write_all`. I'm not sure how relevant
// that is in practice, but it makes sense, and as I've seen a
// comment to that effect in ST's HAL code, I'd rather be
// careful than risk weird bugs.
dma::Priority::very_high(),
dma::Direction::peripheral_to_memory(),
)
}
}
}
/// Implemented for all chaining modes
///
/// This is mostly an internal trait. The user won't typically need to use or
/// implement this, except to call the various static methods that create a
/// mode.
pub trait Mode {
fn prepare(&self, _: &aes::RegisterBlock);
fn select(&self, _: &mut cr::W);
}
impl dyn Mode {
/// Use this with [`AES::enable`] to encrypt using ECB
pub fn ecb_encrypt() -> ECB<Encrypt> {
ECB(Encrypt)
}
/// Use this with [`AES::enable`] to decrypt using ECB
pub fn ecb_decrypt() -> ECB<Decrypt> {
ECB(Decrypt)
}
/// Use this with [`AES::enable`] to encrypt using CBC
pub fn cbc_encrypt(init_vector: [u32; 4]) -> CBC<Encrypt> {
CBC {
_mode: Encrypt,
init_vector,
}
}
/// Use this with [`AES::enable`] to decrypt using CBC
pub fn cbc_decrypt(init_vector: [u32; 4]) -> CBC<Decrypt> {
CBC {
_mode: Decrypt,
init_vector,
}
}
/// Use this with [`AES::enable`] to encrypt or decrypt using CTR
pub fn ctr(init_vector: [u32; 3]) -> CTR {
CTR { init_vector }
}
}
/// The ECB (electronic code book) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using ECB
/// mode. `Mode` must be either [`Encrypt`] or [`Decrypt`].
///
/// You gen get an instance of this struct via [`Mode::ecb_encrypt`] or
/// [`Mode::ecb_decrypt`].
pub struct ECB<Mode>(Mode);
impl Mode for ECB<Encrypt> {
fn prepare(&self, _: &aes::RegisterBlock) {
// Nothing to do.
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select ECB chaining mode
w.chmod().bits(0b00);
// Select encryption mode
w.mode().bits(0b00);
}
}
}
impl Mode for ECB<Decrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
derive_key(aes)
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select ECB chaining mode
w.chmod().bits(0b00);
// Select decryption mode
w.mode().bits(0b10);
}
}
}
/// The CBC (cipher block chaining) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using CBC
/// mode. `Mode` must be either [`Encrypt`] or [`Decrypt`].
///
/// You gen get an instance of this struct via [`Mode::cbc_encrypt`] or
/// [`Mode::cbc_decrypt`].
pub struct CBC<Mode> {
_mode: Mode,
init_vector: [u32; 4],
}
impl Mode for CBC<Encrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
// Safe, as the registers accept the full range of `u32`.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(self.init_vector[3]));
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select CBC chaining mode
w.chmod().bits(0b01);
// Select encryption mode
w.mode().bits(0b00);
}
}
}
impl Mode for CBC<Decrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
derive_key(aes);
// Safe, as the registers accept the full range of `u32`.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(self.init_vector[3]));
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select CBC chaining mode
w.chmod().bits(0b01);
// Select decryption mode
w.mode().bits(0b10);
}
}
}
/// The CTR (counter) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using CTR
/// mode. In CTR mode, encryption and decryption are technically identical, so
/// further qualification is not required.
///
/// You gen get an instance of this struct via [`Mode::ctr`].
pub struct CTR {
init_vector: [u32; 3],
}
impl Mode for CTR {
fn prepare(&self, aes: &aes::RegisterBlock) {
// Initialize initialization vector
//
// See STM32L0x2 reference manual, table 78 on page 408.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(0x0001)); // counter
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select Counter Mode (CTR) mode
w.chmod().bits(0b10);
// These bits mean encryption mode, but in CTR mode,
// encryption and descryption are technically identical, so this
// is fine for either mode.
w.mode().bits(0b00);
}
}
}
fn derive_key(aes: &aes::RegisterBlock) {
// Select key derivation mode. This is safe, as we're writing a valid bit
// pattern.
aes.cr.modify(|_, w| w.mode().bits(0b01));
// Enable the peripheral. It will be automatically disabled again once the
// key has been derived.
aes.cr.modify(|_, w| w.en().set_bit());
// Wait for key derivation to finish
while aes.sr.read().ccf().bit_is_clear() {}
}
/// Used to identify encryption mode
pub struct Encrypt;
/// Used to identify decryption mode
pub struct Decrypt;
/// A 128-bit block
///
/// The AES peripheral processes 128 bits at a time, so this represents one unit
/// of processing.
pub type Block = [u8; 16];
#[derive(Debug)]
pub enum Error {
/// AES peripheral is busy
Busy,
}
/// Wrapper around a [`dma::Transfer`].
///
/// This struct is required, because under the hood, the AES peripheral only
/// supports 32-bit word DMA transfers, while the public API works with byte
/// slices.
pub struct Transfer<Target, Channel, Buffer, State> {
buffer: Pin<Buffer>,
inner: dma::Transfer<Target, Channel, dma::PtrBuffer<u32>, State>,
}
impl<Target, Channel, Buffer> Transfer<Target, Channel, Buffer, dma::Ready>
where
Target: dma::Target<Channel>,
Channel: dma::Channel,
Buffer: Deref + 'static,
Buffer::Target: AsSlice<Element = u8>,
{
/// Create a new instance of `Transfer`
///
/// # Safety
///
/// If this is used to prepare a memory-to-peripheral transfer, the caller
/// must make sure that the buffer can be read from safely.
///
/// If this is used to prepare a peripheral-to-memory transfer, the caller
/// must make sure that the buffer can be written to safely.
///
/// The caller must guarantee that the buffer length is a multiple of 4.
unsafe fn new(
dma: &mut dma::Handle,
target: Target,
channel: Channel,
buffer: Pin<Buffer>,
address: u32,
priority: dma::Priority,
dir: dma::Direction,
) -> Self {
let num_words = buffer.as_slice().len() / 4;
let transfer = dma::Transfer::new(
dma,
target,
channel,
// The caller must guarantee that our length is a multiple of 4, so
// this should be fine.
Pin::new(dma::PtrBuffer {
ptr: buffer.as_slice().as_ptr() as *const u32,
len: num_words,
}),
num_words,
address,
priority,
dir,
false,
);
Self {
buffer,
inner: transfer,
}
}
/// Enables the provided interrupts
///
/// This setting only affects this transfer. It doesn't affect transfer on
/// other channels, or subsequent transfers on the same channel.
pub fn enable_interrupts(&mut self, interrupts: dma::Interrupts) {
self.inner.enable_interrupts(interrupts)
}
/// Start the DMA transfer
///
/// Consumes this instance of `Transfer` and returns a new one, with its
/// state changes to indicate that the transfer has been started.
pub fn start(self) -> Transfer<Target, Channel, Buffer, dma::Started> {
Transfer {
buffer: self.buffer,
inner: self.inner.start(),
}
}
}
impl<Target, Channel, Buffer> Transfer<Target, Channel, Buffer, dma::Started>
where
Channel: dma::Channel,
{
/// Indicates whether the transfer is still ongoing
pub fn is_active(&self) -> bool {
self.inner.is_active()
}
/// Waits for the transfer to finish and returns the owned resources
///
/// This function will busily wait until the transfer is finished. If you
/// don't want this, please call this function only once you know that the
/// transfer has finished.
///
/// This function will return immediately, if [`Transfer::is_active`]
/// returns `false`.
pub fn wait(self) -> dma::TransferResourcesResult<Target, Channel, Buffer> {
let (res, err) = match self.inner.wait() {
Ok(res) => (res, None),
Err((res, err)) => (res, Some(err)),
};
let res = dma::TransferResources {
target: res.target,
channel: res.channel,
buffer: self.buffer,
};
match err {
None => Ok(res),
Some(err) => Err((res, err)),
}
}
} | rx: Rx(()),
tx: Tx(()),
} | random_line_split |
peer.rs | use std::io::BufferedReader;
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::option::Option;
use std::io::timer::sleep;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::super::events::*;
use super::parsers::{read_rpc, as_network_msg, make_id_bytes};
use super::types::*;
static CONNECT_TIMEOUT: u64 = 3000;
// Each peer should have one of these, and they should be consistent across
// nodes.
pub struct NetPeer<'a> {
pub id: u64,
pub conf: NetPeerConfig<'a>,
// If we have an open connection to this peer, then this will be Some(...).
pub stream: Option<TcpStream>,
to_raft: Sender<RaftMsg>,
mgmt_port: Receiver<MgmtMsg>,
shutdown: bool,
}
impl<'a> NetPeer<'a> {
/*
* id: id of local Raft server
* conf: configuration for network peer
* to_raft: Sender for telling Raft about network messages
* mgmt_port: for peer manager
*/
pub fn spawn(id: u64, conf: &NetPeerConfig, to_raft: Sender<RaftMsg>) -> Sender<MgmtMsg> {
let (mgmt_send, mgmt_port) = channel();
let conf = conf.clone();
spawn(proc() {
let mut netpeer = NetPeer::new(id, conf, to_raft, mgmt_port);
netpeer.peer_loop();
});
mgmt_send
}
fn new(id: u64, config: NetPeerConfig, to_raft: Sender<RaftMsg>, mgmt_port: Receiver<MgmtMsg>) -> NetPeer {
NetPeer {
id: id,
conf: config,
stream: None,
to_raft: to_raft,
mgmt_port: mgmt_port,
shutdown: false,
}
}
fn try_connect(&mut self) -> bool {
self.check_mgmt_msg();
if self.shutdown {
return false;
}
match TcpStream::connect_timeout(self.conf.address, CONNECT_TIMEOUT) {
Ok(mut stream) => {
if stream.write(make_id_bytes(self.id).as_slice()).is_err() {
drop(stream);
return false;
}
debug!("[{}] Sent handshake req to {}", self.id, self.conf.id);
let success = self.attach_stream(stream.clone());
if !success {
drop(stream);
}
success
}
Err(e) => {
debug!("[{}] Err connecting to {}: {}@{}", self.id, self.conf.id, self.conf.address, e);
false
}
}
}
fn peer_loop(&mut self) {
while(self.stream.is_none()) {
debug!("[{}] No stream, trying to attach one.", self.id);
self.check_mgmt_msg();
if self.stream.is_none() { self.try_connect(); }
if self.shutdown { return; }
sleep(CONNECT_TIMEOUT);
}
let mut stream = self.stream.clone().unwrap();
let sender = self.to_raft.clone();
self.check_mgmt_msg();
debug!("[{}] Attached stream from {}.", self.id, stream.peer_name());
loop {
sleep(CONNECT_TIMEOUT);
self.check_mgmt_msg();
let either_rpc = read_rpc(stream.clone());
match either_rpc {
Ok(rpc) => {
self.check_mgmt_msg();
self.send_rpc(rpc, stream.clone());
}
Err(e) => {
self.check_mgmt_msg();
if self.stream.is_some() {
let mut stream = self.stream.take_unwrap();
drop(stream);
}
self.stream = None;
debug!("[{}] Dropped peer: {}", self.id, e);
break;
}
}
}
self.stream = None;
self.check_mgmt_msg();
if !self.shutdown {
debug!("[{}] No shutdown msg: spinning back up ...", self.id);
self.peer_loop();
}
else {
debug!("[{}] shutting down.", self.id);
}
}
fn | (&mut self) {
match self.mgmt_port.try_recv() {
Ok(msg) => {
match msg {
AttachStreamMsg(id, mut stream) => {
if id == self.conf.id {
self.attach_stream(stream);
}
}
SendMsg(rpc) => {
if self.stream.is_some() {
self.send_rpc(rpc, self.stream.clone().unwrap());
}
}
StopMsg => {
self.shutdown = true;
self.stream = None;
}
}
}
_ => {
}
}
}
/*
* Send an RPC up to Raft, waiting for a reply if we need to.
*/
fn send_rpc(&self, rpc: RaftRpc, mut stream: TcpStream) -> bool {
match rpc {
RpcARQ(aereq) => {
debug!("[{}] Received ARQ: {}", self.id, aereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(ARQ(aereq, resp_send));
let aeres = resp_recv.recv();
let msg = as_network_msg(RpcARS(aeres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcARS(aeres) => {
debug!("[{}] Received ARS: {}", self.id, aeres);
self.to_raft.send(ARS(aeres));
true
}
RpcVRQ(votereq) => {
debug!("[{}] Received VRQ: {}", self.id, votereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(VRQ(votereq, resp_send));
let voteres = resp_recv.recv();
let msg = as_network_msg(RpcVRS(voteres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcVRS(voteres) => {
debug!("[{}] Received VRS: {}", self.id, voteres);
self.to_raft.send(VRS(voteres));
true
}
RpcStopReq => {
debug!("[{}] Received RpcStop", self.id);
self.to_raft.send(StopReq);
false
}
}
}
/*
* If the node chose to connect to us, then we got a connection on our listening
* address and need to give the stream to us here.
*
* Returns: True if we successfully connected, false if we thought we already had
* an open connection to this peer (so this connection gets dropped).
*/
pub fn attach_stream(&mut self, stream: TcpStream) -> bool {
self.check_mgmt_msg();
if self.stream.is_some() || self.shutdown {
drop(stream);
return false;
}
self.stream = Some(stream);
true
}
}
// TODO: Get the old parsing code out of the Git history and work it into
// this configuration.
#[cfg(test)]
mod test {
use std::io::{TcpStream, BufferedReader, IoResult, IoError, InvalidInput};
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::{Acceptor, Listener, TcpListener, TcpStream};
use std::io::net::tcp::TcpAcceptor;
use super::super::super::events::*;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::NetPeer;
use super::super::types::*;
use super::super::parsers::*;
#[test]
fn test_spawn() {
let pc = NetPeerConfig {
id: 1,
address: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8844,
},
client_addr: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8840,
},
};
let (send1, recv1) = channel();
let (send2, recv2) = channel();
let mut peer1_sd = NetPeer::spawn(2, &pc.clone(), send1);
let mut peer2_sd = NetPeer::spawn(3, &pc, send2);
let listener: TcpListener = TcpListener::bind("127.0.0.1", 8844).unwrap();
let mut acceptor: TcpAcceptor = listener.listen().unwrap();
// Spawn two peers
let mut count = 0;
// Send each peer the vote
let vote = VoteReq {
term: 0,
candidate_id: 0,
last_log_index: 0,
last_log_term: 0,
uuid: Uuid::new(Version4Random).unwrap(),
};
let from_raft_voteres = VoteRes {
term: 0,
vote_granted: true,
uuid: Uuid::new(Version4Random).unwrap(),
};
for mut stream in acceptor.incoming() {
let vote_bytes = as_network_msg(RpcVRQ(vote.clone()));
debug!("[test_spawn()] {}", read_helo(stream.clone()));
stream.write(vote_bytes);
count += 1;
debug!("[test_spawn()] Sent {} vote requests.", count);
if count > 1 {
break;
}
}
let mut replies = 0;
// We should get the votes back out on the port that we were waiting on
debug!("test_spawn(): waiting for replies");
spawn(proc() {
match recv1.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
});
match recv2.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
peer1_sd.send(StopMsg);
peer2_sd.send(StopMsg);
drop(acceptor);
}
}
| check_mgmt_msg | identifier_name |
peer.rs | use std::io::BufferedReader;
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::option::Option;
use std::io::timer::sleep;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::super::events::*;
use super::parsers::{read_rpc, as_network_msg, make_id_bytes};
use super::types::*;
static CONNECT_TIMEOUT: u64 = 3000;
// Each peer should have one of these, and they should be consistent across
// nodes.
pub struct NetPeer<'a> {
pub id: u64,
pub conf: NetPeerConfig<'a>,
// If we have an open connection to this peer, then this will be Some(...).
pub stream: Option<TcpStream>,
to_raft: Sender<RaftMsg>,
mgmt_port: Receiver<MgmtMsg>,
shutdown: bool,
}
impl<'a> NetPeer<'a> {
/*
* id: id of local Raft server
* conf: configuration for network peer
* to_raft: Sender for telling Raft about network messages
* mgmt_port: for peer manager
*/
pub fn spawn(id: u64, conf: &NetPeerConfig, to_raft: Sender<RaftMsg>) -> Sender<MgmtMsg> |
fn new(id: u64, config: NetPeerConfig, to_raft: Sender<RaftMsg>, mgmt_port: Receiver<MgmtMsg>) -> NetPeer {
NetPeer {
id: id,
conf: config,
stream: None,
to_raft: to_raft,
mgmt_port: mgmt_port,
shutdown: false,
}
}
fn try_connect(&mut self) -> bool {
self.check_mgmt_msg();
if self.shutdown {
return false;
}
match TcpStream::connect_timeout(self.conf.address, CONNECT_TIMEOUT) {
Ok(mut stream) => {
if stream.write(make_id_bytes(self.id).as_slice()).is_err() {
drop(stream);
return false;
}
debug!("[{}] Sent handshake req to {}", self.id, self.conf.id);
let success = self.attach_stream(stream.clone());
if !success {
drop(stream);
}
success
}
Err(e) => {
debug!("[{}] Err connecting to {}: {}@{}", self.id, self.conf.id, self.conf.address, e);
false
}
}
}
fn peer_loop(&mut self) {
while(self.stream.is_none()) {
debug!("[{}] No stream, trying to attach one.", self.id);
self.check_mgmt_msg();
if self.stream.is_none() { self.try_connect(); }
if self.shutdown { return; }
sleep(CONNECT_TIMEOUT);
}
let mut stream = self.stream.clone().unwrap();
let sender = self.to_raft.clone();
self.check_mgmt_msg();
debug!("[{}] Attached stream from {}.", self.id, stream.peer_name());
loop {
sleep(CONNECT_TIMEOUT);
self.check_mgmt_msg();
let either_rpc = read_rpc(stream.clone());
match either_rpc {
Ok(rpc) => {
self.check_mgmt_msg();
self.send_rpc(rpc, stream.clone());
}
Err(e) => {
self.check_mgmt_msg();
if self.stream.is_some() {
let mut stream = self.stream.take_unwrap();
drop(stream);
}
self.stream = None;
debug!("[{}] Dropped peer: {}", self.id, e);
break;
}
}
}
self.stream = None;
self.check_mgmt_msg();
if !self.shutdown {
debug!("[{}] No shutdown msg: spinning back up ...", self.id);
self.peer_loop();
}
else {
debug!("[{}] shutting down.", self.id);
}
}
fn check_mgmt_msg(&mut self) {
match self.mgmt_port.try_recv() {
Ok(msg) => {
match msg {
AttachStreamMsg(id, mut stream) => {
if id == self.conf.id {
self.attach_stream(stream);
}
}
SendMsg(rpc) => {
if self.stream.is_some() {
self.send_rpc(rpc, self.stream.clone().unwrap());
}
}
StopMsg => {
self.shutdown = true;
self.stream = None;
}
}
}
_ => {
}
}
}
/*
* Send an RPC up to Raft, waiting for a reply if we need to.
*/
fn send_rpc(&self, rpc: RaftRpc, mut stream: TcpStream) -> bool {
match rpc {
RpcARQ(aereq) => {
debug!("[{}] Received ARQ: {}", self.id, aereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(ARQ(aereq, resp_send));
let aeres = resp_recv.recv();
let msg = as_network_msg(RpcARS(aeres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcARS(aeres) => {
debug!("[{}] Received ARS: {}", self.id, aeres);
self.to_raft.send(ARS(aeres));
true
}
RpcVRQ(votereq) => {
debug!("[{}] Received VRQ: {}", self.id, votereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(VRQ(votereq, resp_send));
let voteres = resp_recv.recv();
let msg = as_network_msg(RpcVRS(voteres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcVRS(voteres) => {
debug!("[{}] Received VRS: {}", self.id, voteres);
self.to_raft.send(VRS(voteres));
true
}
RpcStopReq => {
debug!("[{}] Received RpcStop", self.id);
self.to_raft.send(StopReq);
false
}
}
}
/*
* If the node chose to connect to us, then we got a connection on our listening
* address and need to give the stream to us here.
*
* Returns: True if we successfully connected, false if we thought we already had
* an open connection to this peer (so this connection gets dropped).
*/
pub fn attach_stream(&mut self, stream: TcpStream) -> bool {
self.check_mgmt_msg();
if self.stream.is_some() || self.shutdown {
drop(stream);
return false;
}
self.stream = Some(stream);
true
}
}
// TODO: Get the old parsing code out of the Git history and work it into
// this configuration.
#[cfg(test)]
mod test {
use std::io::{TcpStream, BufferedReader, IoResult, IoError, InvalidInput};
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::{Acceptor, Listener, TcpListener, TcpStream};
use std::io::net::tcp::TcpAcceptor;
use super::super::super::events::*;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::NetPeer;
use super::super::types::*;
use super::super::parsers::*;
#[test]
fn test_spawn() {
let pc = NetPeerConfig {
id: 1,
address: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8844,
},
client_addr: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8840,
},
};
let (send1, recv1) = channel();
let (send2, recv2) = channel();
let mut peer1_sd = NetPeer::spawn(2, &pc.clone(), send1);
let mut peer2_sd = NetPeer::spawn(3, &pc, send2);
let listener: TcpListener = TcpListener::bind("127.0.0.1", 8844).unwrap();
let mut acceptor: TcpAcceptor = listener.listen().unwrap();
// Spawn two peers
let mut count = 0;
// Send each peer the vote
let vote = VoteReq {
term: 0,
candidate_id: 0,
last_log_index: 0,
last_log_term: 0,
uuid: Uuid::new(Version4Random).unwrap(),
};
let from_raft_voteres = VoteRes {
term: 0,
vote_granted: true,
uuid: Uuid::new(Version4Random).unwrap(),
};
for mut stream in acceptor.incoming() {
let vote_bytes = as_network_msg(RpcVRQ(vote.clone()));
debug!("[test_spawn()] {}", read_helo(stream.clone()));
stream.write(vote_bytes);
count += 1;
debug!("[test_spawn()] Sent {} vote requests.", count);
if count > 1 {
break;
}
}
let mut replies = 0;
// We should get the votes back out on the port that we were waiting on
debug!("test_spawn(): waiting for replies");
spawn(proc() {
match recv1.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
});
match recv2.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
peer1_sd.send(StopMsg);
peer2_sd.send(StopMsg);
drop(acceptor);
}
}
| {
let (mgmt_send, mgmt_port) = channel();
let conf = conf.clone();
spawn(proc() {
let mut netpeer = NetPeer::new(id, conf, to_raft, mgmt_port);
netpeer.peer_loop();
});
mgmt_send
} | identifier_body |
peer.rs | use std::io::BufferedReader;
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::option::Option;
use std::io::timer::sleep;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::super::events::*;
use super::parsers::{read_rpc, as_network_msg, make_id_bytes};
use super::types::*;
static CONNECT_TIMEOUT: u64 = 3000;
// Each peer should have one of these, and they should be consistent across
// nodes.
pub struct NetPeer<'a> {
pub id: u64,
pub conf: NetPeerConfig<'a>,
// If we have an open connection to this peer, then this will be Some(...).
pub stream: Option<TcpStream>,
to_raft: Sender<RaftMsg>,
mgmt_port: Receiver<MgmtMsg>,
shutdown: bool,
}
impl<'a> NetPeer<'a> {
/*
* id: id of local Raft server
* conf: configuration for network peer
* to_raft: Sender for telling Raft about network messages
* mgmt_port: for peer manager
*/
pub fn spawn(id: u64, conf: &NetPeerConfig, to_raft: Sender<RaftMsg>) -> Sender<MgmtMsg> {
let (mgmt_send, mgmt_port) = channel();
let conf = conf.clone();
spawn(proc() {
let mut netpeer = NetPeer::new(id, conf, to_raft, mgmt_port);
netpeer.peer_loop();
});
mgmt_send
}
fn new(id: u64, config: NetPeerConfig, to_raft: Sender<RaftMsg>, mgmt_port: Receiver<MgmtMsg>) -> NetPeer {
NetPeer {
id: id,
conf: config,
stream: None,
to_raft: to_raft,
mgmt_port: mgmt_port,
shutdown: false,
}
}
fn try_connect(&mut self) -> bool {
self.check_mgmt_msg();
if self.shutdown {
return false;
}
match TcpStream::connect_timeout(self.conf.address, CONNECT_TIMEOUT) {
Ok(mut stream) => {
if stream.write(make_id_bytes(self.id).as_slice()).is_err() {
drop(stream);
return false;
}
debug!("[{}] Sent handshake req to {}", self.id, self.conf.id);
let success = self.attach_stream(stream.clone());
if !success {
drop(stream);
}
success
}
Err(e) => {
debug!("[{}] Err connecting to {}: {}@{}", self.id, self.conf.id, self.conf.address, e);
false
}
}
}
fn peer_loop(&mut self) {
while(self.stream.is_none()) {
debug!("[{}] No stream, trying to attach one.", self.id);
self.check_mgmt_msg();
if self.stream.is_none() { self.try_connect(); }
if self.shutdown { return; }
sleep(CONNECT_TIMEOUT);
}
let mut stream = self.stream.clone().unwrap();
let sender = self.to_raft.clone();
self.check_mgmt_msg();
debug!("[{}] Attached stream from {}.", self.id, stream.peer_name());
loop {
sleep(CONNECT_TIMEOUT);
self.check_mgmt_msg();
let either_rpc = read_rpc(stream.clone());
match either_rpc {
Ok(rpc) => {
self.check_mgmt_msg();
self.send_rpc(rpc, stream.clone());
}
Err(e) => {
self.check_mgmt_msg();
if self.stream.is_some() {
let mut stream = self.stream.take_unwrap();
drop(stream);
}
self.stream = None;
debug!("[{}] Dropped peer: {}", self.id, e);
break;
}
}
}
self.stream = None;
self.check_mgmt_msg();
if !self.shutdown {
debug!("[{}] No shutdown msg: spinning back up ...", self.id);
self.peer_loop();
}
else {
debug!("[{}] shutting down.", self.id);
}
}
fn check_mgmt_msg(&mut self) {
match self.mgmt_port.try_recv() {
Ok(msg) => {
match msg {
AttachStreamMsg(id, mut stream) => {
if id == self.conf.id {
self.attach_stream(stream);
}
}
SendMsg(rpc) => {
if self.stream.is_some() {
self.send_rpc(rpc, self.stream.clone().unwrap());
}
}
StopMsg => {
self.shutdown = true;
self.stream = None;
}
}
}
_ => {
}
}
}
/*
* Send an RPC up to Raft, waiting for a reply if we need to.
*/
fn send_rpc(&self, rpc: RaftRpc, mut stream: TcpStream) -> bool {
match rpc {
RpcARQ(aereq) => {
debug!("[{}] Received ARQ: {}", self.id, aereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(ARQ(aereq, resp_send));
let aeres = resp_recv.recv();
let msg = as_network_msg(RpcARS(aeres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcARS(aeres) => {
debug!("[{}] Received ARS: {}", self.id, aeres);
self.to_raft.send(ARS(aeres));
true
}
RpcVRQ(votereq) => {
debug!("[{}] Received VRQ: {}", self.id, votereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(VRQ(votereq, resp_send));
let voteres = resp_recv.recv();
let msg = as_network_msg(RpcVRS(voteres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcVRS(voteres) => {
debug!("[{}] Received VRS: {}", self.id, voteres);
self.to_raft.send(VRS(voteres));
true
}
RpcStopReq => {
debug!("[{}] Received RpcStop", self.id);
self.to_raft.send(StopReq);
false
}
}
}
/*
* If the node chose to connect to us, then we got a connection on our listening
* address and need to give the stream to us here.
*
* Returns: True if we successfully connected, false if we thought we already had
* an open connection to this peer (so this connection gets dropped).
*/
pub fn attach_stream(&mut self, stream: TcpStream) -> bool {
self.check_mgmt_msg();
if self.stream.is_some() || self.shutdown {
drop(stream);
return false;
}
self.stream = Some(stream);
true
}
}
// TODO: Get the old parsing code out of the Git history and work it into
// this configuration.
#[cfg(test)]
mod test {
use std::io::{TcpStream, BufferedReader, IoResult, IoError, InvalidInput};
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::{Acceptor, Listener, TcpListener, TcpStream};
use std::io::net::tcp::TcpAcceptor;
use super::super::super::events::*;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::NetPeer;
use super::super::types::*;
use super::super::parsers::*;
#[test]
fn test_spawn() {
let pc = NetPeerConfig {
id: 1,
address: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8844,
},
client_addr: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8840,
},
};
let (send1, recv1) = channel();
let (send2, recv2) = channel();
let mut peer1_sd = NetPeer::spawn(2, &pc.clone(), send1);
let mut peer2_sd = NetPeer::spawn(3, &pc, send2);
let listener: TcpListener = TcpListener::bind("127.0.0.1", 8844).unwrap();
let mut acceptor: TcpAcceptor = listener.listen().unwrap();
// Spawn two peers
let mut count = 0;
// Send each peer the vote
let vote = VoteReq {
term: 0,
candidate_id: 0,
last_log_index: 0,
last_log_term: 0,
uuid: Uuid::new(Version4Random).unwrap(),
};
let from_raft_voteres = VoteRes {
term: 0,
vote_granted: true,
uuid: Uuid::new(Version4Random).unwrap(),
};
for mut stream in acceptor.incoming() {
let vote_bytes = as_network_msg(RpcVRQ(vote.clone()));
debug!("[test_spawn()] {}", read_helo(stream.clone())); | break;
}
}
let mut replies = 0;
// We should get the votes back out on the port that we were waiting on
debug!("test_spawn(): waiting for replies");
spawn(proc() {
match recv1.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
});
match recv2.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
peer1_sd.send(StopMsg);
peer2_sd.send(StopMsg);
drop(acceptor);
}
} | stream.write(vote_bytes);
count += 1;
debug!("[test_spawn()] Sent {} vote requests.", count);
if count > 1 { | random_line_split |
peer.rs | use std::io::BufferedReader;
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::option::Option;
use std::io::timer::sleep;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::super::events::*;
use super::parsers::{read_rpc, as_network_msg, make_id_bytes};
use super::types::*;
static CONNECT_TIMEOUT: u64 = 3000;
// Each peer should have one of these, and they should be consistent across
// nodes.
pub struct NetPeer<'a> {
pub id: u64,
pub conf: NetPeerConfig<'a>,
// If we have an open connection to this peer, then this will be Some(...).
pub stream: Option<TcpStream>,
to_raft: Sender<RaftMsg>,
mgmt_port: Receiver<MgmtMsg>,
shutdown: bool,
}
impl<'a> NetPeer<'a> {
/*
* id: id of local Raft server
* conf: configuration for network peer
* to_raft: Sender for telling Raft about network messages
* mgmt_port: for peer manager
*/
pub fn spawn(id: u64, conf: &NetPeerConfig, to_raft: Sender<RaftMsg>) -> Sender<MgmtMsg> {
let (mgmt_send, mgmt_port) = channel();
let conf = conf.clone();
spawn(proc() {
let mut netpeer = NetPeer::new(id, conf, to_raft, mgmt_port);
netpeer.peer_loop();
});
mgmt_send
}
fn new(id: u64, config: NetPeerConfig, to_raft: Sender<RaftMsg>, mgmt_port: Receiver<MgmtMsg>) -> NetPeer {
NetPeer {
id: id,
conf: config,
stream: None,
to_raft: to_raft,
mgmt_port: mgmt_port,
shutdown: false,
}
}
fn try_connect(&mut self) -> bool {
self.check_mgmt_msg();
if self.shutdown {
return false;
}
match TcpStream::connect_timeout(self.conf.address, CONNECT_TIMEOUT) {
Ok(mut stream) => {
if stream.write(make_id_bytes(self.id).as_slice()).is_err() {
drop(stream);
return false;
}
debug!("[{}] Sent handshake req to {}", self.id, self.conf.id);
let success = self.attach_stream(stream.clone());
if !success {
drop(stream);
}
success
}
Err(e) => {
debug!("[{}] Err connecting to {}: {}@{}", self.id, self.conf.id, self.conf.address, e);
false
}
}
}
fn peer_loop(&mut self) {
while(self.stream.is_none()) {
debug!("[{}] No stream, trying to attach one.", self.id);
self.check_mgmt_msg();
if self.stream.is_none() { self.try_connect(); }
if self.shutdown { return; }
sleep(CONNECT_TIMEOUT);
}
let mut stream = self.stream.clone().unwrap();
let sender = self.to_raft.clone();
self.check_mgmt_msg();
debug!("[{}] Attached stream from {}.", self.id, stream.peer_name());
loop {
sleep(CONNECT_TIMEOUT);
self.check_mgmt_msg();
let either_rpc = read_rpc(stream.clone());
match either_rpc {
Ok(rpc) => {
self.check_mgmt_msg();
self.send_rpc(rpc, stream.clone());
}
Err(e) => {
self.check_mgmt_msg();
if self.stream.is_some() {
let mut stream = self.stream.take_unwrap();
drop(stream);
}
self.stream = None;
debug!("[{}] Dropped peer: {}", self.id, e);
break;
}
}
}
self.stream = None;
self.check_mgmt_msg();
if !self.shutdown {
debug!("[{}] No shutdown msg: spinning back up ...", self.id);
self.peer_loop();
}
else {
debug!("[{}] shutting down.", self.id);
}
}
fn check_mgmt_msg(&mut self) {
match self.mgmt_port.try_recv() {
Ok(msg) => {
match msg {
AttachStreamMsg(id, mut stream) => {
if id == self.conf.id {
self.attach_stream(stream);
}
}
SendMsg(rpc) => |
StopMsg => {
self.shutdown = true;
self.stream = None;
}
}
}
_ => {
}
}
}
/*
* Send an RPC up to Raft, waiting for a reply if we need to.
*/
fn send_rpc(&self, rpc: RaftRpc, mut stream: TcpStream) -> bool {
match rpc {
RpcARQ(aereq) => {
debug!("[{}] Received ARQ: {}", self.id, aereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(ARQ(aereq, resp_send));
let aeres = resp_recv.recv();
let msg = as_network_msg(RpcARS(aeres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcARS(aeres) => {
debug!("[{}] Received ARS: {}", self.id, aeres);
self.to_raft.send(ARS(aeres));
true
}
RpcVRQ(votereq) => {
debug!("[{}] Received VRQ: {}", self.id, votereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(VRQ(votereq, resp_send));
let voteres = resp_recv.recv();
let msg = as_network_msg(RpcVRS(voteres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcVRS(voteres) => {
debug!("[{}] Received VRS: {}", self.id, voteres);
self.to_raft.send(VRS(voteres));
true
}
RpcStopReq => {
debug!("[{}] Received RpcStop", self.id);
self.to_raft.send(StopReq);
false
}
}
}
/*
* If the node chose to connect to us, then we got a connection on our listening
* address and need to give the stream to us here.
*
* Returns: True if we successfully connected, false if we thought we already had
* an open connection to this peer (so this connection gets dropped).
*/
pub fn attach_stream(&mut self, stream: TcpStream) -> bool {
self.check_mgmt_msg();
if self.stream.is_some() || self.shutdown {
drop(stream);
return false;
}
self.stream = Some(stream);
true
}
}
// TODO: Get the old parsing code out of the Git history and work it into
// this configuration.
#[cfg(test)]
mod test {
use std::io::{TcpStream, BufferedReader, IoResult, IoError, InvalidInput};
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::{Acceptor, Listener, TcpListener, TcpStream};
use std::io::net::tcp::TcpAcceptor;
use super::super::super::events::*;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::NetPeer;
use super::super::types::*;
use super::super::parsers::*;
#[test]
fn test_spawn() {
let pc = NetPeerConfig {
id: 1,
address: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8844,
},
client_addr: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8840,
},
};
let (send1, recv1) = channel();
let (send2, recv2) = channel();
let mut peer1_sd = NetPeer::spawn(2, &pc.clone(), send1);
let mut peer2_sd = NetPeer::spawn(3, &pc, send2);
let listener: TcpListener = TcpListener::bind("127.0.0.1", 8844).unwrap();
let mut acceptor: TcpAcceptor = listener.listen().unwrap();
// Spawn two peers
let mut count = 0;
// Send each peer the vote
let vote = VoteReq {
term: 0,
candidate_id: 0,
last_log_index: 0,
last_log_term: 0,
uuid: Uuid::new(Version4Random).unwrap(),
};
let from_raft_voteres = VoteRes {
term: 0,
vote_granted: true,
uuid: Uuid::new(Version4Random).unwrap(),
};
for mut stream in acceptor.incoming() {
let vote_bytes = as_network_msg(RpcVRQ(vote.clone()));
debug!("[test_spawn()] {}", read_helo(stream.clone()));
stream.write(vote_bytes);
count += 1;
debug!("[test_spawn()] Sent {} vote requests.", count);
if count > 1 {
break;
}
}
let mut replies = 0;
// We should get the votes back out on the port that we were waiting on
debug!("test_spawn(): waiting for replies");
spawn(proc() {
match recv1.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
});
match recv2.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
peer1_sd.send(StopMsg);
peer2_sd.send(StopMsg);
drop(acceptor);
}
}
| {
if self.stream.is_some() {
self.send_rpc(rpc, self.stream.clone().unwrap());
}
} | conditional_block |
import_videos.py | # coding=utf8
# encoding: utf-8
import os
import platform
import re
import signal
import sys
import traceback
from subprocess import Popen, PIPE
from threading import Thread, current_thread
from Queue import Queue
from util.log import get_logger, log
from video.models import Video, KeywordVideoId
from django.db.models import Max
from collect_video import G_GEN_IMAGE
MAX_THREAD_NUM = 4
THREAD_STOP_FLAGS = []
THUMB_DIR = './static/thumb'
THUMB_SIZE = '180x135'
COVER_DIR = './static/cover'
FLIP_DIR = './static/flip'
FLIP_NUM = 10
task_queue = Queue(maxsize=2000)
def register_int_signal_handler():
def stop_thread_handler(signum, frame):
log.info("Received signal {0}. Will stop all task threads".format(signum))
for _ in range(len(THREAD_STOP_FLAGS)):
THREAD_STOP_FLAGS[_] = True
if platform.platform().startswith('Windows'):
signal.signal(signal.CTRL_C_EVENT, stop_thread_handler)
else:
signal.signal(signal.SIGINT, stop_thread_handler)
def next_video_id(current, path):
existing = Video.objects.filter(path=path)
if existing:
return existing[0].video_id, current
current += 1
return current, current
def create_task_list(path_list):
"""
Walks path recursively, and create a task list
:param path_list: a list of (path, rating)
:return: a list of ImportTask objects
"""
current_video_id = Video.objects.all().aggregate(Max('video_id'))['video_id__max']
if not current_video_id:
current_video_id = 0
task_list = []
for (path, rating) in path_list:
base_path = os.path.split(path)[0]
if os.path.isfile(path):
file_name = os.path.basename(path)
if is_valid_video_file(path, file_name):
video_id, current_video_id = next_video_id(current_video_id, path)
task_list.append(ImportTask(video_id, base_path, path, rating))
continue
for (root, dirs, files) in os.walk(path):
for file_name in files:
try:
file_path = os.path.join(root, file_name)
if os.path.isdir(file_path):
continue
if is_valid_video_file(file_path, file_name):
video_id, current_video_id = next_video_id(current_video_id, file_path)
task_list.append(ImportTask(video_id, base_path, file_path, rating))
except:
log.error('#Error while proceeding: {0}'.format(file_name))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
return task_list
def start_tasks(task_list):
global task_queue
for task in task_list:
task_queue.put(task)
if not THREAD_STOP_FLAGS:
for _ in range(MAX_THREAD_NUM):
THREAD_STOP_FLAGS.append(True)
if not os.path.isdir(COVER_DIR):
os.mkdir(COVER_DIR)
if not os.path.isdir(THUMB_DIR):
os.mkdir(THUMB_DIR)
if not os.path.isdir(FLIP_DIR):
os.mkdir(FLIP_DIR)
for _ in range(MAX_THREAD_NUM):
if THREAD_STOP_FLAGS[_]:
t = Thread(target=import_worker, kwargs={'thread_index': _})
t.name = str(_)
t.daemon = False
t.start()
task_queue.join()
def add_keywords_to_db(task_list):
blacklist = load_keyword_blacklist_from_file()
for task in task_list:
base_path = task.base_path
file_path = task.file_path
video_id = task.video_id
keywords = get_keywords(base_path, file_path, blacklist)
log.info('#Keywords:'.format(keywords))
for key in keywords:
try:
if KeywordVideoId.objects.filter(keyword=key, video_id=video_id):
log.info("Existing keyword {0} for {1}".format(key, video_id))
continue
keyword_record = KeywordVideoId()
keyword_record.keyword = key
keyword_record.video = Video.objects.get(video_id=video_id)
keyword_record.save()
log.info('#Added keyword:{0} for video_id: {1}'.format(key, video_id))
except Exception as e:
log.error("Error while adding keyword {0} to video {1}: {2}".format(key, video_id, e))
class ImportTask(object):
def __init__(self, video_id, base_path, path, rating=Video.P): | :param rating: rating of the video, highest by default.
"""
self.video_id = video_id
self.base_path = base_path
self.file_path = path
self.rating = rating
def import_worker(thread_index):
"""
Thread worker that deals with tasks.
:return:
"""
THREAD_STOP_FLAGS[thread_index] = False
while not (THREAD_STOP_FLAGS[thread_index] or task_queue.empty()):
task = task_queue.get()
do_import_video_task(task)
task_queue.task_done()
THREAD_STOP_FLAGS[thread_index] = True
def do_import_video_task(task):
video_id = task.video_id
file_path = task.file_path
rating = task.rating
file_name = os.path.basename(file_path)[:-4]
tlog = get_logger(current_thread().name)
videos = Video.objects.filter(path=file_path)
if videos:
tlog.info("Existing video: {0}".format(task.file_path))
return
video = Video()
video.video_id = video_id
video.rating = rating
thumb_path = get_thumb_path(video.video_id)
cover_path = get_cover_path(video.video_id)
if not gen_cover(task.file_path, cover_path):
tlog.error("Failed to gen cover for {0}".format(file_path))
return
success, duration = gen_thumb(file_path, thumb_path)
if success:
if not gen_flips(file_path, video.video_id, duration, FLIP_DIR, FLIP_NUM):
tlog.error("Failed to gen flips for {0}".format(file_path))
else:
tlog.error("Failed to gen thumb for {0}".format(file_path))
video.title = file_name
video.path = file_path
video.duration = duration
video.save()
tlog.info('#Video: {0} [{1}] {2}'.format(video.title, video.duration, video.path))
def is_valid_video_file(file_path, file_name):
# skip hidden files (possibly not valid video files)
if file_name.startswith('.') or (not file_name.endswith('.mp4')):
return False
if os.path.getsize(file_path) == 0:
log.info('Remove invalid video file: {0}'.format(file_path))
os.remove(file_path)
return False
return True
def load_keyword_blacklist_from_file():
blacklist = set()
keyword_file = 'keywords.blacklist'
try:
with open(keyword_file, 'r') as kfp:
for line in kfp:
line = line.strip('\n')
if line:
blacklist.add(line)
log.info("Keywords blacklist: {0}".format(blacklist))
except Exception as e:
log.error("Error while processing {0}:{1}".format(keyword_file, e))
return blacklist
def get_keywords(prefix, file_path, blacklist):
"""
Get keywords from file path
:param prefix: Prefix of the dir path, so we can ignore them
:param file_path: full path of the video file
:param blacklist: A set of words/symbols that should be ignored
:return: a list of keywords
"""
file_path = str(file_path).replace(prefix, '') # remove base_dir from file_path
file_path = os.path.splitext(file_path)[0] # Only keep the part without extension
file_path = str(file_path).lower()
for bad_keyword in blacklist:
file_path = file_path.replace(bad_keyword, ' ')
file_path = re.sub(r'\s+', ' ', file_path) # Replace multiple spaces to single one
keywords = file_path.split(' ')
keywords = [k for k in keywords if k]
return keywords
class KeywordDictDataObj(object):
def __init__(self):
self.count = 0
self.files = set()
def get_thumb_path(fn):
return './static/thumb/' + str(fn) + '.png'
def get_cover_path(fn):
return './static/cover/' + str(fn) + '.png'
def gen_thumb(video_path, thumb_path):
"""
Generate thumb image for the given video, and grabs duration from output
:return: (success, duration)
"""
if os.path.isfile(thumb_path):
os.remove(thumb_path)
global THUMB_SIZE
cmd = ['ffmpeg', '-itsoffset', '-5', '-i', video_path, '-vframes', '1', '-f', 'apng', '-s', THUMB_SIZE, thumb_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output = p.communicate()[1]
duration = search_duration_from_text(output)
if not duration:
tlog = get_logger(current_thread().name)
tlog.error("Failed to find duration for {0}".format(video_path))
duration = 0
return p.returncode == 0, duration
def gen_flips(video_path, video_id, duration, flip_path, flip_num):
"""
Generate flips for the given video
:param video_path: path of the video
:param video_id: id of the file
:param duration: duration of video in seconds
:param flip_path: path dir to put the flips
:param flip_num: number of flips to generate
:return: True on success, False otherwise
"""
if not G_GEN_IMAGE:
return True
duration = float(duration)
flip_num = float(flip_num)
interval = duration / flip_num
if interval <= 0.0:
tlog = get_logger(current_thread().name)
tlog.error("Cannot generate flips. Duration: {0} FlipNum:{1}".format(duration, flip_num))
return False
fps = 'fps=1/' + str(interval)
global THUMB_SIZE
flip_path = os.path.join(flip_path, str(video_id))
for _ in range(FLIP_NUM+3):
flip_file = "{0}-{1}.png".format(flip_path, _)
if os.path.isfile(flip_file):
os.remove(flip_file)
flip_path_template = flip_path + '-%d.png'
cmd = ['ffmpeg', '-i', video_path, '-vf', fps, '-s', THUMB_SIZE, flip_path_template]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
def gen_cover(video_path, cover_path):
if not G_GEN_IMAGE:
return True
if os.path.isfile(cover_path):
os.remove(cover_path)
cmd = ['ffmpeg', '-itsoffset', '-1', '-i', video_path, '-vframes', '1', '-f', 'apng', cover_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
# Convert video to mp4
def convert_video_to_mp4(video_path, dest_path):
tlog = get_logger(current_thread().name)
if os.path.isfile(dest_path):
tlog.info('#Already converted, skip: {0}'.format(dest_path))
return True
tlog.info('#Converting: {0} => {1}\n', video_path, dest_path)
cmd = ['ffmpeg', '-i', video_path, '-vcodec', 'h264', '-acodec', 'aac', dest_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
# Search the duration from given text
def search_duration_from_text(text):
# Match pattern like Duration: 00:24:14.91, s
regExp = re.compile(r'Duration: (\d{2}):(\d{2}):(\d{2})')
result = regExp.search(text, re.M | re.U)
if result is not None:
(hour, min, sec) = result.groups()
duration = int(hour) * 3600 + int(min) * 60 + int(sec)
return duration
return None | """
Create an import task object.
:param video_id: a pre-allocated video_id in number, so we don't need to lock db in multiple thread.
:param base_path: path prefix that will be ignored when creating keywords from path.
:param path: path of the file | random_line_split |
import_videos.py | # coding=utf8
# encoding: utf-8
import os
import platform
import re
import signal
import sys
import traceback
from subprocess import Popen, PIPE
from threading import Thread, current_thread
from Queue import Queue
from util.log import get_logger, log
from video.models import Video, KeywordVideoId
from django.db.models import Max
from collect_video import G_GEN_IMAGE
MAX_THREAD_NUM = 4
THREAD_STOP_FLAGS = []
THUMB_DIR = './static/thumb'
THUMB_SIZE = '180x135'
COVER_DIR = './static/cover'
FLIP_DIR = './static/flip'
FLIP_NUM = 10
task_queue = Queue(maxsize=2000)
def register_int_signal_handler():
def stop_thread_handler(signum, frame):
log.info("Received signal {0}. Will stop all task threads".format(signum))
for _ in range(len(THREAD_STOP_FLAGS)):
THREAD_STOP_FLAGS[_] = True
if platform.platform().startswith('Windows'):
signal.signal(signal.CTRL_C_EVENT, stop_thread_handler)
else:
signal.signal(signal.SIGINT, stop_thread_handler)
def next_video_id(current, path):
existing = Video.objects.filter(path=path)
if existing:
return existing[0].video_id, current
current += 1
return current, current
def create_task_list(path_list):
"""
Walks path recursively, and create a task list
:param path_list: a list of (path, rating)
:return: a list of ImportTask objects
"""
current_video_id = Video.objects.all().aggregate(Max('video_id'))['video_id__max']
if not current_video_id:
current_video_id = 0
task_list = []
for (path, rating) in path_list:
base_path = os.path.split(path)[0]
if os.path.isfile(path):
file_name = os.path.basename(path)
if is_valid_video_file(path, file_name):
video_id, current_video_id = next_video_id(current_video_id, path)
task_list.append(ImportTask(video_id, base_path, path, rating))
continue
for (root, dirs, files) in os.walk(path):
for file_name in files:
try:
file_path = os.path.join(root, file_name)
if os.path.isdir(file_path):
continue
if is_valid_video_file(file_path, file_name):
video_id, current_video_id = next_video_id(current_video_id, file_path)
task_list.append(ImportTask(video_id, base_path, file_path, rating))
except:
log.error('#Error while proceeding: {0}'.format(file_name))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
return task_list
def start_tasks(task_list):
global task_queue
for task in task_list:
task_queue.put(task)
if not THREAD_STOP_FLAGS:
for _ in range(MAX_THREAD_NUM):
THREAD_STOP_FLAGS.append(True)
if not os.path.isdir(COVER_DIR):
os.mkdir(COVER_DIR)
if not os.path.isdir(THUMB_DIR):
os.mkdir(THUMB_DIR)
if not os.path.isdir(FLIP_DIR):
os.mkdir(FLIP_DIR)
for _ in range(MAX_THREAD_NUM):
if THREAD_STOP_FLAGS[_]:
t = Thread(target=import_worker, kwargs={'thread_index': _})
t.name = str(_)
t.daemon = False
t.start()
task_queue.join()
def add_keywords_to_db(task_list):
blacklist = load_keyword_blacklist_from_file()
for task in task_list:
base_path = task.base_path
file_path = task.file_path
video_id = task.video_id
keywords = get_keywords(base_path, file_path, blacklist)
log.info('#Keywords:'.format(keywords))
for key in keywords:
try:
if KeywordVideoId.objects.filter(keyword=key, video_id=video_id):
log.info("Existing keyword {0} for {1}".format(key, video_id))
continue
keyword_record = KeywordVideoId()
keyword_record.keyword = key
keyword_record.video = Video.objects.get(video_id=video_id)
keyword_record.save()
log.info('#Added keyword:{0} for video_id: {1}'.format(key, video_id))
except Exception as e:
log.error("Error while adding keyword {0} to video {1}: {2}".format(key, video_id, e))
class ImportTask(object):
def __init__(self, video_id, base_path, path, rating=Video.P):
"""
Create an import task object.
:param video_id: a pre-allocated video_id in number, so we don't need to lock db in multiple thread.
:param base_path: path prefix that will be ignored when creating keywords from path.
:param path: path of the file
:param rating: rating of the video, highest by default.
"""
self.video_id = video_id
self.base_path = base_path
self.file_path = path
self.rating = rating
def import_worker(thread_index):
"""
Thread worker that deals with tasks.
:return:
"""
THREAD_STOP_FLAGS[thread_index] = False
while not (THREAD_STOP_FLAGS[thread_index] or task_queue.empty()):
task = task_queue.get()
do_import_video_task(task)
task_queue.task_done()
THREAD_STOP_FLAGS[thread_index] = True
def do_import_video_task(task):
video_id = task.video_id
file_path = task.file_path
rating = task.rating
file_name = os.path.basename(file_path)[:-4]
tlog = get_logger(current_thread().name)
videos = Video.objects.filter(path=file_path)
if videos:
tlog.info("Existing video: {0}".format(task.file_path))
return
video = Video()
video.video_id = video_id
video.rating = rating
thumb_path = get_thumb_path(video.video_id)
cover_path = get_cover_path(video.video_id)
if not gen_cover(task.file_path, cover_path):
tlog.error("Failed to gen cover for {0}".format(file_path))
return
success, duration = gen_thumb(file_path, thumb_path)
if success:
if not gen_flips(file_path, video.video_id, duration, FLIP_DIR, FLIP_NUM):
tlog.error("Failed to gen flips for {0}".format(file_path))
else:
tlog.error("Failed to gen thumb for {0}".format(file_path))
video.title = file_name
video.path = file_path
video.duration = duration
video.save()
tlog.info('#Video: {0} [{1}] {2}'.format(video.title, video.duration, video.path))
def is_valid_video_file(file_path, file_name):
# skip hidden files (possibly not valid video files)
if file_name.startswith('.') or (not file_name.endswith('.mp4')):
return False
if os.path.getsize(file_path) == 0:
log.info('Remove invalid video file: {0}'.format(file_path))
os.remove(file_path)
return False
return True
def load_keyword_blacklist_from_file():
blacklist = set()
keyword_file = 'keywords.blacklist'
try:
with open(keyword_file, 'r') as kfp:
for line in kfp:
line = line.strip('\n')
if line:
blacklist.add(line)
log.info("Keywords blacklist: {0}".format(blacklist))
except Exception as e:
log.error("Error while processing {0}:{1}".format(keyword_file, e))
return blacklist
def get_keywords(prefix, file_path, blacklist):
"""
Get keywords from file path
:param prefix: Prefix of the dir path, so we can ignore them
:param file_path: full path of the video file
:param blacklist: A set of words/symbols that should be ignored
:return: a list of keywords
"""
file_path = str(file_path).replace(prefix, '') # remove base_dir from file_path
file_path = os.path.splitext(file_path)[0] # Only keep the part without extension
file_path = str(file_path).lower()
for bad_keyword in blacklist:
file_path = file_path.replace(bad_keyword, ' ')
file_path = re.sub(r'\s+', ' ', file_path) # Replace multiple spaces to single one
keywords = file_path.split(' ')
keywords = [k for k in keywords if k]
return keywords
class KeywordDictDataObj(object):
def __init__(self):
self.count = 0
self.files = set()
def get_thumb_path(fn):
return './static/thumb/' + str(fn) + '.png'
def get_cover_path(fn):
return './static/cover/' + str(fn) + '.png'
def gen_thumb(video_path, thumb_path):
"""
Generate thumb image for the given video, and grabs duration from output
:return: (success, duration)
"""
if os.path.isfile(thumb_path):
os.remove(thumb_path)
global THUMB_SIZE
cmd = ['ffmpeg', '-itsoffset', '-5', '-i', video_path, '-vframes', '1', '-f', 'apng', '-s', THUMB_SIZE, thumb_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output = p.communicate()[1]
duration = search_duration_from_text(output)
if not duration:
tlog = get_logger(current_thread().name)
tlog.error("Failed to find duration for {0}".format(video_path))
duration = 0
return p.returncode == 0, duration
def gen_flips(video_path, video_id, duration, flip_path, flip_num):
"""
Generate flips for the given video
:param video_path: path of the video
:param video_id: id of the file
:param duration: duration of video in seconds
:param flip_path: path dir to put the flips
:param flip_num: number of flips to generate
:return: True on success, False otherwise
"""
if not G_GEN_IMAGE:
|
duration = float(duration)
flip_num = float(flip_num)
interval = duration / flip_num
if interval <= 0.0:
tlog = get_logger(current_thread().name)
tlog.error("Cannot generate flips. Duration: {0} FlipNum:{1}".format(duration, flip_num))
return False
fps = 'fps=1/' + str(interval)
global THUMB_SIZE
flip_path = os.path.join(flip_path, str(video_id))
for _ in range(FLIP_NUM+3):
flip_file = "{0}-{1}.png".format(flip_path, _)
if os.path.isfile(flip_file):
os.remove(flip_file)
flip_path_template = flip_path + '-%d.png'
cmd = ['ffmpeg', '-i', video_path, '-vf', fps, '-s', THUMB_SIZE, flip_path_template]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
def gen_cover(video_path, cover_path):
if not G_GEN_IMAGE:
return True
if os.path.isfile(cover_path):
os.remove(cover_path)
cmd = ['ffmpeg', '-itsoffset', '-1', '-i', video_path, '-vframes', '1', '-f', 'apng', cover_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
# Convert video to mp4
def convert_video_to_mp4(video_path, dest_path):
tlog = get_logger(current_thread().name)
if os.path.isfile(dest_path):
tlog.info('#Already converted, skip: {0}'.format(dest_path))
return True
tlog.info('#Converting: {0} => {1}\n', video_path, dest_path)
cmd = ['ffmpeg', '-i', video_path, '-vcodec', 'h264', '-acodec', 'aac', dest_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
# Search the duration from given text
def search_duration_from_text(text):
# Match pattern like Duration: 00:24:14.91, s
regExp = re.compile(r'Duration: (\d{2}):(\d{2}):(\d{2})')
result = regExp.search(text, re.M | re.U)
if result is not None:
(hour, min, sec) = result.groups()
duration = int(hour) * 3600 + int(min) * 60 + int(sec)
return duration
return None
| return True | conditional_block |
import_videos.py | # coding=utf8
# encoding: utf-8
import os
import platform
import re
import signal
import sys
import traceback
from subprocess import Popen, PIPE
from threading import Thread, current_thread
from Queue import Queue
from util.log import get_logger, log
from video.models import Video, KeywordVideoId
from django.db.models import Max
from collect_video import G_GEN_IMAGE
MAX_THREAD_NUM = 4
THREAD_STOP_FLAGS = []
THUMB_DIR = './static/thumb'
THUMB_SIZE = '180x135'
COVER_DIR = './static/cover'
FLIP_DIR = './static/flip'
FLIP_NUM = 10
task_queue = Queue(maxsize=2000)
def register_int_signal_handler():
def stop_thread_handler(signum, frame):
log.info("Received signal {0}. Will stop all task threads".format(signum))
for _ in range(len(THREAD_STOP_FLAGS)):
THREAD_STOP_FLAGS[_] = True
if platform.platform().startswith('Windows'):
signal.signal(signal.CTRL_C_EVENT, stop_thread_handler)
else:
signal.signal(signal.SIGINT, stop_thread_handler)
def next_video_id(current, path):
existing = Video.objects.filter(path=path)
if existing:
return existing[0].video_id, current
current += 1
return current, current
def create_task_list(path_list):
"""
Walks path recursively, and create a task list
:param path_list: a list of (path, rating)
:return: a list of ImportTask objects
"""
current_video_id = Video.objects.all().aggregate(Max('video_id'))['video_id__max']
if not current_video_id:
current_video_id = 0
task_list = []
for (path, rating) in path_list:
base_path = os.path.split(path)[0]
if os.path.isfile(path):
file_name = os.path.basename(path)
if is_valid_video_file(path, file_name):
video_id, current_video_id = next_video_id(current_video_id, path)
task_list.append(ImportTask(video_id, base_path, path, rating))
continue
for (root, dirs, files) in os.walk(path):
for file_name in files:
try:
file_path = os.path.join(root, file_name)
if os.path.isdir(file_path):
continue
if is_valid_video_file(file_path, file_name):
video_id, current_video_id = next_video_id(current_video_id, file_path)
task_list.append(ImportTask(video_id, base_path, file_path, rating))
except:
log.error('#Error while proceeding: {0}'.format(file_name))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
return task_list
def start_tasks(task_list):
global task_queue
for task in task_list:
task_queue.put(task)
if not THREAD_STOP_FLAGS:
for _ in range(MAX_THREAD_NUM):
THREAD_STOP_FLAGS.append(True)
if not os.path.isdir(COVER_DIR):
os.mkdir(COVER_DIR)
if not os.path.isdir(THUMB_DIR):
os.mkdir(THUMB_DIR)
if not os.path.isdir(FLIP_DIR):
os.mkdir(FLIP_DIR)
for _ in range(MAX_THREAD_NUM):
if THREAD_STOP_FLAGS[_]:
t = Thread(target=import_worker, kwargs={'thread_index': _})
t.name = str(_)
t.daemon = False
t.start()
task_queue.join()
def add_keywords_to_db(task_list):
blacklist = load_keyword_blacklist_from_file()
for task in task_list:
base_path = task.base_path
file_path = task.file_path
video_id = task.video_id
keywords = get_keywords(base_path, file_path, blacklist)
log.info('#Keywords:'.format(keywords))
for key in keywords:
try:
if KeywordVideoId.objects.filter(keyword=key, video_id=video_id):
log.info("Existing keyword {0} for {1}".format(key, video_id))
continue
keyword_record = KeywordVideoId()
keyword_record.keyword = key
keyword_record.video = Video.objects.get(video_id=video_id)
keyword_record.save()
log.info('#Added keyword:{0} for video_id: {1}'.format(key, video_id))
except Exception as e:
log.error("Error while adding keyword {0} to video {1}: {2}".format(key, video_id, e))
class ImportTask(object):
def __init__(self, video_id, base_path, path, rating=Video.P):
"""
Create an import task object.
:param video_id: a pre-allocated video_id in number, so we don't need to lock db in multiple thread.
:param base_path: path prefix that will be ignored when creating keywords from path.
:param path: path of the file
:param rating: rating of the video, highest by default.
"""
self.video_id = video_id
self.base_path = base_path
self.file_path = path
self.rating = rating
def import_worker(thread_index):
"""
Thread worker that deals with tasks.
:return:
"""
THREAD_STOP_FLAGS[thread_index] = False
while not (THREAD_STOP_FLAGS[thread_index] or task_queue.empty()):
task = task_queue.get()
do_import_video_task(task)
task_queue.task_done()
THREAD_STOP_FLAGS[thread_index] = True
def do_import_video_task(task):
video_id = task.video_id
file_path = task.file_path
rating = task.rating
file_name = os.path.basename(file_path)[:-4]
tlog = get_logger(current_thread().name)
videos = Video.objects.filter(path=file_path)
if videos:
tlog.info("Existing video: {0}".format(task.file_path))
return
video = Video()
video.video_id = video_id
video.rating = rating
thumb_path = get_thumb_path(video.video_id)
cover_path = get_cover_path(video.video_id)
if not gen_cover(task.file_path, cover_path):
tlog.error("Failed to gen cover for {0}".format(file_path))
return
success, duration = gen_thumb(file_path, thumb_path)
if success:
if not gen_flips(file_path, video.video_id, duration, FLIP_DIR, FLIP_NUM):
tlog.error("Failed to gen flips for {0}".format(file_path))
else:
tlog.error("Failed to gen thumb for {0}".format(file_path))
video.title = file_name
video.path = file_path
video.duration = duration
video.save()
tlog.info('#Video: {0} [{1}] {2}'.format(video.title, video.duration, video.path))
def is_valid_video_file(file_path, file_name):
# skip hidden files (possibly not valid video files)
if file_name.startswith('.') or (not file_name.endswith('.mp4')):
return False
if os.path.getsize(file_path) == 0:
log.info('Remove invalid video file: {0}'.format(file_path))
os.remove(file_path)
return False
return True
def load_keyword_blacklist_from_file():
blacklist = set()
keyword_file = 'keywords.blacklist'
try:
with open(keyword_file, 'r') as kfp:
for line in kfp:
line = line.strip('\n')
if line:
blacklist.add(line)
log.info("Keywords blacklist: {0}".format(blacklist))
except Exception as e:
log.error("Error while processing {0}:{1}".format(keyword_file, e))
return blacklist
def get_keywords(prefix, file_path, blacklist):
"""
Get keywords from file path
:param prefix: Prefix of the dir path, so we can ignore them
:param file_path: full path of the video file
:param blacklist: A set of words/symbols that should be ignored
:return: a list of keywords
"""
file_path = str(file_path).replace(prefix, '') # remove base_dir from file_path
file_path = os.path.splitext(file_path)[0] # Only keep the part without extension
file_path = str(file_path).lower()
for bad_keyword in blacklist:
file_path = file_path.replace(bad_keyword, ' ')
file_path = re.sub(r'\s+', ' ', file_path) # Replace multiple spaces to single one
keywords = file_path.split(' ')
keywords = [k for k in keywords if k]
return keywords
class KeywordDictDataObj(object):
def __init__(self):
self.count = 0
self.files = set()
def get_thumb_path(fn):
return './static/thumb/' + str(fn) + '.png'
def get_cover_path(fn):
return './static/cover/' + str(fn) + '.png'
def gen_thumb(video_path, thumb_path):
"""
Generate thumb image for the given video, and grabs duration from output
:return: (success, duration)
"""
if os.path.isfile(thumb_path):
os.remove(thumb_path)
global THUMB_SIZE
cmd = ['ffmpeg', '-itsoffset', '-5', '-i', video_path, '-vframes', '1', '-f', 'apng', '-s', THUMB_SIZE, thumb_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output = p.communicate()[1]
duration = search_duration_from_text(output)
if not duration:
tlog = get_logger(current_thread().name)
tlog.error("Failed to find duration for {0}".format(video_path))
duration = 0
return p.returncode == 0, duration
def gen_flips(video_path, video_id, duration, flip_path, flip_num):
"""
Generate flips for the given video
:param video_path: path of the video
:param video_id: id of the file
:param duration: duration of video in seconds
:param flip_path: path dir to put the flips
:param flip_num: number of flips to generate
:return: True on success, False otherwise
"""
if not G_GEN_IMAGE:
return True
duration = float(duration)
flip_num = float(flip_num)
interval = duration / flip_num
if interval <= 0.0:
tlog = get_logger(current_thread().name)
tlog.error("Cannot generate flips. Duration: {0} FlipNum:{1}".format(duration, flip_num))
return False
fps = 'fps=1/' + str(interval)
global THUMB_SIZE
flip_path = os.path.join(flip_path, str(video_id))
for _ in range(FLIP_NUM+3):
flip_file = "{0}-{1}.png".format(flip_path, _)
if os.path.isfile(flip_file):
os.remove(flip_file)
flip_path_template = flip_path + '-%d.png'
cmd = ['ffmpeg', '-i', video_path, '-vf', fps, '-s', THUMB_SIZE, flip_path_template]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
def gen_cover(video_path, cover_path):
if not G_GEN_IMAGE:
return True
if os.path.isfile(cover_path):
os.remove(cover_path)
cmd = ['ffmpeg', '-itsoffset', '-1', '-i', video_path, '-vframes', '1', '-f', 'apng', cover_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
# Convert video to mp4
def | (video_path, dest_path):
tlog = get_logger(current_thread().name)
if os.path.isfile(dest_path):
tlog.info('#Already converted, skip: {0}'.format(dest_path))
return True
tlog.info('#Converting: {0} => {1}\n', video_path, dest_path)
cmd = ['ffmpeg', '-i', video_path, '-vcodec', 'h264', '-acodec', 'aac', dest_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
# Search the duration from given text
def search_duration_from_text(text):
# Match pattern like Duration: 00:24:14.91, s
regExp = re.compile(r'Duration: (\d{2}):(\d{2}):(\d{2})')
result = regExp.search(text, re.M | re.U)
if result is not None:
(hour, min, sec) = result.groups()
duration = int(hour) * 3600 + int(min) * 60 + int(sec)
return duration
return None
| convert_video_to_mp4 | identifier_name |
import_videos.py | # coding=utf8
# encoding: utf-8
import os
import platform
import re
import signal
import sys
import traceback
from subprocess import Popen, PIPE
from threading import Thread, current_thread
from Queue import Queue
from util.log import get_logger, log
from video.models import Video, KeywordVideoId
from django.db.models import Max
from collect_video import G_GEN_IMAGE
MAX_THREAD_NUM = 4
THREAD_STOP_FLAGS = []
THUMB_DIR = './static/thumb'
THUMB_SIZE = '180x135'
COVER_DIR = './static/cover'
FLIP_DIR = './static/flip'
FLIP_NUM = 10
task_queue = Queue(maxsize=2000)
def register_int_signal_handler():
def stop_thread_handler(signum, frame):
log.info("Received signal {0}. Will stop all task threads".format(signum))
for _ in range(len(THREAD_STOP_FLAGS)):
THREAD_STOP_FLAGS[_] = True
if platform.platform().startswith('Windows'):
signal.signal(signal.CTRL_C_EVENT, stop_thread_handler)
else:
signal.signal(signal.SIGINT, stop_thread_handler)
def next_video_id(current, path):
existing = Video.objects.filter(path=path)
if existing:
return existing[0].video_id, current
current += 1
return current, current
def create_task_list(path_list):
"""
Walks path recursively, and create a task list
:param path_list: a list of (path, rating)
:return: a list of ImportTask objects
"""
current_video_id = Video.objects.all().aggregate(Max('video_id'))['video_id__max']
if not current_video_id:
current_video_id = 0
task_list = []
for (path, rating) in path_list:
base_path = os.path.split(path)[0]
if os.path.isfile(path):
file_name = os.path.basename(path)
if is_valid_video_file(path, file_name):
video_id, current_video_id = next_video_id(current_video_id, path)
task_list.append(ImportTask(video_id, base_path, path, rating))
continue
for (root, dirs, files) in os.walk(path):
for file_name in files:
try:
file_path = os.path.join(root, file_name)
if os.path.isdir(file_path):
continue
if is_valid_video_file(file_path, file_name):
video_id, current_video_id = next_video_id(current_video_id, file_path)
task_list.append(ImportTask(video_id, base_path, file_path, rating))
except:
log.error('#Error while proceeding: {0}'.format(file_name))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
return task_list
def start_tasks(task_list):
global task_queue
for task in task_list:
task_queue.put(task)
if not THREAD_STOP_FLAGS:
for _ in range(MAX_THREAD_NUM):
THREAD_STOP_FLAGS.append(True)
if not os.path.isdir(COVER_DIR):
os.mkdir(COVER_DIR)
if not os.path.isdir(THUMB_DIR):
os.mkdir(THUMB_DIR)
if not os.path.isdir(FLIP_DIR):
os.mkdir(FLIP_DIR)
for _ in range(MAX_THREAD_NUM):
if THREAD_STOP_FLAGS[_]:
t = Thread(target=import_worker, kwargs={'thread_index': _})
t.name = str(_)
t.daemon = False
t.start()
task_queue.join()
def add_keywords_to_db(task_list):
blacklist = load_keyword_blacklist_from_file()
for task in task_list:
base_path = task.base_path
file_path = task.file_path
video_id = task.video_id
keywords = get_keywords(base_path, file_path, blacklist)
log.info('#Keywords:'.format(keywords))
for key in keywords:
try:
if KeywordVideoId.objects.filter(keyword=key, video_id=video_id):
log.info("Existing keyword {0} for {1}".format(key, video_id))
continue
keyword_record = KeywordVideoId()
keyword_record.keyword = key
keyword_record.video = Video.objects.get(video_id=video_id)
keyword_record.save()
log.info('#Added keyword:{0} for video_id: {1}'.format(key, video_id))
except Exception as e:
log.error("Error while adding keyword {0} to video {1}: {2}".format(key, video_id, e))
class ImportTask(object):
def __init__(self, video_id, base_path, path, rating=Video.P):
"""
Create an import task object.
:param video_id: a pre-allocated video_id in number, so we don't need to lock db in multiple thread.
:param base_path: path prefix that will be ignored when creating keywords from path.
:param path: path of the file
:param rating: rating of the video, highest by default.
"""
self.video_id = video_id
self.base_path = base_path
self.file_path = path
self.rating = rating
def import_worker(thread_index):
"""
Thread worker that deals with tasks.
:return:
"""
THREAD_STOP_FLAGS[thread_index] = False
while not (THREAD_STOP_FLAGS[thread_index] or task_queue.empty()):
task = task_queue.get()
do_import_video_task(task)
task_queue.task_done()
THREAD_STOP_FLAGS[thread_index] = True
def do_import_video_task(task):
video_id = task.video_id
file_path = task.file_path
rating = task.rating
file_name = os.path.basename(file_path)[:-4]
tlog = get_logger(current_thread().name)
videos = Video.objects.filter(path=file_path)
if videos:
tlog.info("Existing video: {0}".format(task.file_path))
return
video = Video()
video.video_id = video_id
video.rating = rating
thumb_path = get_thumb_path(video.video_id)
cover_path = get_cover_path(video.video_id)
if not gen_cover(task.file_path, cover_path):
tlog.error("Failed to gen cover for {0}".format(file_path))
return
success, duration = gen_thumb(file_path, thumb_path)
if success:
if not gen_flips(file_path, video.video_id, duration, FLIP_DIR, FLIP_NUM):
tlog.error("Failed to gen flips for {0}".format(file_path))
else:
tlog.error("Failed to gen thumb for {0}".format(file_path))
video.title = file_name
video.path = file_path
video.duration = duration
video.save()
tlog.info('#Video: {0} [{1}] {2}'.format(video.title, video.duration, video.path))
def is_valid_video_file(file_path, file_name):
# skip hidden files (possibly not valid video files)
if file_name.startswith('.') or (not file_name.endswith('.mp4')):
return False
if os.path.getsize(file_path) == 0:
log.info('Remove invalid video file: {0}'.format(file_path))
os.remove(file_path)
return False
return True
def load_keyword_blacklist_from_file():
blacklist = set()
keyword_file = 'keywords.blacklist'
try:
with open(keyword_file, 'r') as kfp:
for line in kfp:
line = line.strip('\n')
if line:
blacklist.add(line)
log.info("Keywords blacklist: {0}".format(blacklist))
except Exception as e:
log.error("Error while processing {0}:{1}".format(keyword_file, e))
return blacklist
def get_keywords(prefix, file_path, blacklist):
"""
Get keywords from file path
:param prefix: Prefix of the dir path, so we can ignore them
:param file_path: full path of the video file
:param blacklist: A set of words/symbols that should be ignored
:return: a list of keywords
"""
file_path = str(file_path).replace(prefix, '') # remove base_dir from file_path
file_path = os.path.splitext(file_path)[0] # Only keep the part without extension
file_path = str(file_path).lower()
for bad_keyword in blacklist:
file_path = file_path.replace(bad_keyword, ' ')
file_path = re.sub(r'\s+', ' ', file_path) # Replace multiple spaces to single one
keywords = file_path.split(' ')
keywords = [k for k in keywords if k]
return keywords
class KeywordDictDataObj(object):
def __init__(self):
self.count = 0
self.files = set()
def get_thumb_path(fn):
return './static/thumb/' + str(fn) + '.png'
def get_cover_path(fn):
|
def gen_thumb(video_path, thumb_path):
"""
Generate thumb image for the given video, and grabs duration from output
:return: (success, duration)
"""
if os.path.isfile(thumb_path):
os.remove(thumb_path)
global THUMB_SIZE
cmd = ['ffmpeg', '-itsoffset', '-5', '-i', video_path, '-vframes', '1', '-f', 'apng', '-s', THUMB_SIZE, thumb_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output = p.communicate()[1]
duration = search_duration_from_text(output)
if not duration:
tlog = get_logger(current_thread().name)
tlog.error("Failed to find duration for {0}".format(video_path))
duration = 0
return p.returncode == 0, duration
def gen_flips(video_path, video_id, duration, flip_path, flip_num):
"""
Generate flips for the given video
:param video_path: path of the video
:param video_id: id of the file
:param duration: duration of video in seconds
:param flip_path: path dir to put the flips
:param flip_num: number of flips to generate
:return: True on success, False otherwise
"""
if not G_GEN_IMAGE:
return True
duration = float(duration)
flip_num = float(flip_num)
interval = duration / flip_num
if interval <= 0.0:
tlog = get_logger(current_thread().name)
tlog.error("Cannot generate flips. Duration: {0} FlipNum:{1}".format(duration, flip_num))
return False
fps = 'fps=1/' + str(interval)
global THUMB_SIZE
flip_path = os.path.join(flip_path, str(video_id))
for _ in range(FLIP_NUM+3):
flip_file = "{0}-{1}.png".format(flip_path, _)
if os.path.isfile(flip_file):
os.remove(flip_file)
flip_path_template = flip_path + '-%d.png'
cmd = ['ffmpeg', '-i', video_path, '-vf', fps, '-s', THUMB_SIZE, flip_path_template]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
def gen_cover(video_path, cover_path):
if not G_GEN_IMAGE:
return True
if os.path.isfile(cover_path):
os.remove(cover_path)
cmd = ['ffmpeg', '-itsoffset', '-1', '-i', video_path, '-vframes', '1', '-f', 'apng', cover_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
# Convert video to mp4
def convert_video_to_mp4(video_path, dest_path):
tlog = get_logger(current_thread().name)
if os.path.isfile(dest_path):
tlog.info('#Already converted, skip: {0}'.format(dest_path))
return True
tlog.info('#Converting: {0} => {1}\n', video_path, dest_path)
cmd = ['ffmpeg', '-i', video_path, '-vcodec', 'h264', '-acodec', 'aac', dest_path]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
# Search the duration from given text
def search_duration_from_text(text):
# Match pattern like Duration: 00:24:14.91, s
regExp = re.compile(r'Duration: (\d{2}):(\d{2}):(\d{2})')
result = regExp.search(text, re.M | re.U)
if result is not None:
(hour, min, sec) = result.groups()
duration = int(hour) * 3600 + int(min) * 60 + int(sec)
return duration
return None
| return './static/cover/' + str(fn) + '.png' | identifier_body |
DoJetFakeFactors.py | """
Interactive script to plot data-MC histograms out of a set of trees.
"""
# Parse command-line options
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('--baseDir', default=None, dest='baseDir', help='Path to base directory containing all ntuples')
p.add_argument('--baseDirModel', default=None, dest='baseDirModel', help='Path to base directory containing all ntuples for the model')
p.add_argument('--fileName', default='ntuple.root', dest='fileName', help='( Default ntuple.root ) Name of files')
p.add_argument('--treeName', default='events' , dest='treeName', help='( Default events ) Name tree in root file')
p.add_argument('--treeNameModel', default='photons' , dest='treeNameModel',help='( Default photons ) Name tree in root file')
p.add_argument('--samplesConf', default=None, dest='samplesConf', help=('Use alternate sample configuration. '
'Must be a python file that implements the configuration '
'in the same manner as in the main() of this script. If only '
'the file name is given it is assumed to be in the same directory '
'as this script, if a path is given, use that path' ) )
p.add_argument('--xsFile', default=None, type=str , dest='xsFile', help='path to cross section file. When calling AddSample in the configuration module, set useXSFile=True to get weights from the provided file')
p.add_argument('--lumi', default=None, type=float , dest='lumi', help='Integrated luminosity (to use with xsFile)')
p.add_argument('--mcweight', default=None, type=float , dest='mcweight', help='Weight to apply to MC samples')
p.add_argument('--outputDir', default=None, type=str , dest='outputDir', help='output directory for histograms')
p.add_argument('--readHists', default=False,action='store_true', dest='readHists', help='read histograms from root files instead of trees')
p.add_argument('--quiet', default=False,action='store_true', dest='quiet', help='disable information messages')
options = p.parse_args()
import sys
import os
import re
import math
import uuid
import copy
import imp
import ROOT
from array import array
import random
from SampleManager import SampleManager
from SampleManager import Sample
ROOT.gROOT.SetBatch(False)
samplesFF = None
samplesData = None
def main() :
global samplesFF
global samplesData
if not options.baseDir.count('/eos/') and not os.path.isdir( options.baseDir ) :
print 'baseDir not found!'
return
samplesFF = SampleManager(options.baseDir, options.treeName, mcweight=options.mcweight, treeNameModel=options.treeNameModel, filename=options.fileName, base_path_model=options.baseDirModel, xsFile=options.xsFile, lumi=options.lumi, readHists=options.readHists, quiet=options.quiet)
base_dir_data = '/afs/cern.ch/work/j/jkunkle/private/CMS/Wgamgam/Output/LepGammaGammaNoPhID_2014_10_29'
samplesData = SampleManager(base_dir_data, options.treeName,filename=options.fileName, xsFile=options.xsFile, lumi=options.lumi, quiet=options.quiet)
if options.samplesConf is not None :
samplesFF.ReadSamples( options.samplesConf )
samplesData.ReadSamples( options.samplesConf )
#cuts_den = '!ph_passChIsoCorrMedium[0] && !ph_passNeuIsoCorrMedium[0] && !ph_passPhoIsoCorrMedium[0] && ph_sigmaIEIE[0]>%f && ph_sigmaIEIE[0] < %f '
#cuts_num = 'ph_passChIsoCorrMedium[0] && ph_passNeuIsoCorrMedium[0] && ph_passPhoIsoCorrMedium[0] && %s && ph_passSIEIEMedium[0] ' %(ec)
loose_cuts = (12, 9, 9)
#loose_cuts = (20, 15, 15)
#loose_cuts = (1000000,1000000,1000000)
cuts_den = 'ph_hasPixSeed[0]==0 && !ph_passNeuIsoCorrMedium[0] && !ph_passPhoIsoCorrMedium[0] && !ph_passSIEIEMedium[0] && !ph_passChIsoCorrMedium[0] && ph_chIsoCorr[0] < %d && ph_neuIsoCorr[0] < %d && ph_phoIsoCorr[0] < %d ' %(loose_cuts[0], loose_cuts[1], loose_cuts[2] )
cuts_num = 'ph_hasPixSeed[0]==0 && ph_passNeuIsoCorrMedium[0] && ph_passPhoIsoCorrMedium[0] && ph_passSIEIEMedium[0] && ph_passChIsoCorrMedium[0] && ph_HoverE12[0] < 0.05'
regions = ['EB', 'EE']
ptbins = [ 15, 25, 40, 70, 1000000 ]
fake_factors = GetFakeFactors(cuts_den, cuts_num, regions, ptbins)
#for key, val in fake_factors.iteritems() :
# print 'Fake factors for %s' %key
# val.Draw()
# raw_input('continue')
#ApplySinglePhotonFF( cuts_den, sieie_cuts, eta_cuts, ptbins, fake_factors )
gg_regions = [ ('EB', 'EB') ]
ApplyDiPhotonFF( loose_cuts, gg_regions, ptbins, fake_factors )
def ApplyDiPhotonFF( loose_cuts, regions, ptbins, fake_factors) :
|
def ApplySinglePhotonFF( cut_str, cut_vals, eta_cuts, ptbins, fake_factors) :
labels = cut_vals.keys()
samp = samplesData.get_samples(name='Data')
sampEval = samplesData.get_samples(name='WjetsZjets')
for lab in labels :
ec = eta_cuts[lab]
vals = cut_vals[lab]
cuts_den = cut_str%vals
cuts_den += ' && ' + ec
den_base = ' mu_passtrig25_n>0 && mu_n==1 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s ' %cuts_den
#generate weighting string
weight_str = ''
var = 'ph_pt[0]'
for idx, min in enumerate(ptbins[:-1]) :
max = ptbins[idx+1]
ff = fake_factors[lab].GetBinContent( fake_factors[lab].FindBin( min ) )
weight_str += ' %f * ( %s > %f && %s <= %f ) +' %( ff, var, min, var, max )
weight_str = weight_str.rstrip(' ').rstrip('+')
tot_str = ' ( %s ) * ( %s ) ' %( weight_str, den_base )
print tot_str
binning = ( 100, 0, 500 )
ddhist = None
evalhist = None
if samp:
samples.create_hist( samp[0], var, tot_str , binning )
ddhist = samp[0].hist.Clone('ddhist')
if sampEval :
samples.create_hist( sampEval[0], var, 'PUWeight * ( mu_passtrig25_n>0 && mu_n==1 && ph_n==1 && ph_passMedium[0] && %s)'%ec , binning )
evalhist = sampEval[0].hist.Clone( 'evalHist' )
evalhist.SetMarkerColor( ROOT.kRed )
evalhist.SetLineColor( ROOT.kRed )
ddhist.Draw()
evalhist.Draw('same')
raw_input('contin')
def GetFakeFactors(cut_den_base, cut_num_base, regions, ptbins) :
binning = ( 500, 0, 500 )
den_sample = 'MuonRealPhotonZgSub'
samp = samplesFF.get_samples(name=den_sample)
fake_factors = {}
output = {}
for reg in regions :
cuts_den = cut_den_base
cuts_den += ' && ph_Is%s[0]' %reg
cuts_num = cut_num_base
cuts_num += ' && ph_Is%s[0]' %reg
full_den_cuts = ' mu_passtrig25_n>0 && mu_n==2 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s && fabs( m_leplep-91.2 ) < 5 && leadPhot_sublLepDR >1 && leadPhot_leadLepDR>1 ' %cuts_den
full_num_cuts = ' mu_passtrig25_n>0 && mu_n==2 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s && fabs( m_leplep-91.2 ) < 5 && leadPhot_sublLepDR >1 && leadPhot_leadLepDR>1 ' %cuts_num
#generate histograms
den_hist = None
num_hist = None
var = 'ph_pt[0]'
if samp:
samplesFF.create_hist( samp[0], var, full_den_cuts, binning )
den_hist = samp[0].hist.Clone( 'den_hist' )
samplesFF.create_hist( samp[0], var, full_num_cuts, binning )
num_hist = samp[0].hist.Clone( 'num_hist' )
for idx, min in enumerate( ptbins[0:-1] ) :
max = ptbins[idx+1]
bin = ( reg, min, max )
num_count = num_hist.Integral( num_hist.FindBin( min), num_hist.FindBin(max ) - 1 )
den_count = den_hist.Integral( den_hist.FindBin( min), den_hist.FindBin(max ) - 1 )
factor = num_count/den_count
print 'Pt bins %f - %f, N num = %f, N den = %f, fake factor = %f ' %( min, max, num_count, den_count, factor )
output[bin] = factor
for idx, max in enumerate( ptbins[1:] ) :
bin = (reg, 15, max )
if bin in output :
continue
num_count = num_hist.Integral( num_hist.FindBin( 15), num_hist.FindBin(max ) - 1 )
den_count = den_hist.Integral( den_hist.FindBin( 15), den_hist.FindBin(max ) - 1 )
factor = num_count/den_count
output[bin] = factor
return output
main()
| draw_cmd_base = 'mu_passtrig25_n>0 && mu_n==1 && ph_n > 1 && dr_ph1_ph2 > 0.3 && m_ph1_ph2>15 && dr_ph1_leadLep>0.4 && dr_ph2_leadLep>0.4 && ph_hasPixSeed[0]==0 && ph_hasPixSeed[1]==0 '
samp = samplesData.get_samples(name='Muon')
for r1,r2 in regions :
draw_cmd = draw_cmd_base + ' && is%s_leadph12 && is%s_sublph12' %( r1,r2)
draw_cmd_LL = '%s && chIsoCorr_leadph12 > 1.5 && neuIsoCorr_leadph12 > 1.0 && phoIsoCorr_leadph12 > 0.7 && chIsoCorr_sublph12 > 1.5 && neuIsoCorr_sublph12 > 1.0 && phoIsoCorr_sublph12 > 0.7 && chIsoCorr_leadph12 < %d && neuIsoCorr_leadph12 < %d && phoIsoCorr_leadph12 < %d && chIsoCorr_sublph12 < %d && neuIsoCorr_sublph12 < %d && phoIsoCorr_sublph12 < %d && sieie_leadph12 > 0.011 && sieie_sublph12 > 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2], loose_cuts[0], loose_cuts[1], loose_cuts[2])
draw_cmd_TL = '%s && chIsoCorr_leadph12 < 1.5 && neuIsoCorr_leadph12 < 1.0 && phoIsoCorr_leadph12 < 0.7 && chIsoCorr_sublph12 > 1.5 && neuIsoCorr_sublph12 > 1.0 && phoIsoCorr_sublph12 > 0.7 && chIsoCorr_sublph12 < %d && neuIsoCorr_sublph12 < %d && phoIsoCorr_sublph12 < %d && sieie_leadph12 < 0.011 && sieie_sublph12 > 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2])
draw_cmd_LT = '%s && chIsoCorr_leadph12 > 1.5 && neuIsoCorr_leadph12 > 1.0 && phoIsoCorr_leadph12 > 0.7 && chIsoCorr_sublph12 < 1.5 && neuIsoCorr_sublph12 < 1.0 && phoIsoCorr_sublph12 < 0.7 && chIsoCorr_leadph12 < %d && neuIsoCorr_leadph12 < %d && phoIsoCorr_leadph12 < %d && sieie_leadph12 > 0.011 && sieie_sublph12 < 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2])
var = 'pt_leadph12'
samplesData.create_hist( samp[0], var, draw_cmd_LL, (100, 0, 500 ) )
LL_hist = samp[0].hist.Clone( 'll_hist' )
samplesData.create_hist( samp[0], var, draw_cmd_TL, (100, 0, 500 ) )
TL_hist = samp[0].hist.Clone( 'tl_hist' )
samplesData.create_hist( samp[0], var, draw_cmd_LT, (100, 0, 500 ) )
LT_hist = samp[0].hist.Clone( 'lt_hist' )
for idx, min in enumerate( ptbins[0:-1] ) :
max = ptbins[idx+1]
bin_lead = ( r1, min, max )
bin_subl = ( r2, 15, max )
LL_count = LL_hist.Integral( LL_hist.FindBin( min ), LL_hist.FindBin( max ) - 1 )
LT_count = LT_hist.Integral( LT_hist.FindBin( min ), LT_hist.FindBin( max ) - 1 )
TL_count = TL_hist.Integral( TL_hist.FindBin( min ), TL_hist.FindBin( max ) - 1 )
ff_lead = fake_factors[bin_lead]
ff_subl = fake_factors[bin_subl]
N_FF_TL = LL_count*ff_lead
N_FF_LT = LL_count*ff_subl
N_RF_TL = TL_count - N_FF_TL
N_FR_LT = LT_count - N_FF_LT
N_RF_TT = N_RF_TL*ff_subl
N_FR_TT = N_FR_LT*ff_lead
N_FF_TT = LL_count*ff_lead*ff_subl
print 'Lead pt = %d - %d, subl pt = %d - %d' %( min, max, 15, max )
print 'N_LL = ', LL_count
print 'N_LT = ', LT_count
print 'N_TL = ', TL_count
print 'N_FF_TL = ', N_FF_TL
print 'N_FF_LT = ', N_FF_LT
print 'N_RF_TL = ', N_RF_TL
print 'N_FR_LT = ', N_FR_LT
print 'N_RF_TT = ', N_RF_TT
print 'N_FR_TT = ', N_FR_TT
print 'N_FF_TT = ', N_FF_TT
print 'Sum = ', (N_RF_TT+N_FR_TT+N_FF_TT) | identifier_body |
DoJetFakeFactors.py | """
Interactive script to plot data-MC histograms out of a set of trees.
"""
# Parse command-line options
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('--baseDir', default=None, dest='baseDir', help='Path to base directory containing all ntuples')
p.add_argument('--baseDirModel', default=None, dest='baseDirModel', help='Path to base directory containing all ntuples for the model')
p.add_argument('--fileName', default='ntuple.root', dest='fileName', help='( Default ntuple.root ) Name of files')
p.add_argument('--treeName', default='events' , dest='treeName', help='( Default events ) Name tree in root file')
p.add_argument('--treeNameModel', default='photons' , dest='treeNameModel',help='( Default photons ) Name tree in root file')
p.add_argument('--samplesConf', default=None, dest='samplesConf', help=('Use alternate sample configuration. '
'Must be a python file that implements the configuration '
'in the same manner as in the main() of this script. If only '
'the file name is given it is assumed to be in the same directory '
'as this script, if a path is given, use that path' ) )
p.add_argument('--xsFile', default=None, type=str , dest='xsFile', help='path to cross section file. When calling AddSample in the configuration module, set useXSFile=True to get weights from the provided file')
p.add_argument('--lumi', default=None, type=float , dest='lumi', help='Integrated luminosity (to use with xsFile)')
p.add_argument('--mcweight', default=None, type=float , dest='mcweight', help='Weight to apply to MC samples')
p.add_argument('--outputDir', default=None, type=str , dest='outputDir', help='output directory for histograms')
p.add_argument('--readHists', default=False,action='store_true', dest='readHists', help='read histograms from root files instead of trees')
p.add_argument('--quiet', default=False,action='store_true', dest='quiet', help='disable information messages')
options = p.parse_args()
import sys
import os
import re
import math
import uuid
import copy
import imp
import ROOT
from array import array
import random
from SampleManager import SampleManager
from SampleManager import Sample
ROOT.gROOT.SetBatch(False)
samplesFF = None
samplesData = None
def main() :
global samplesFF
global samplesData
if not options.baseDir.count('/eos/') and not os.path.isdir( options.baseDir ) :
print 'baseDir not found!'
return
samplesFF = SampleManager(options.baseDir, options.treeName, mcweight=options.mcweight, treeNameModel=options.treeNameModel, filename=options.fileName, base_path_model=options.baseDirModel, xsFile=options.xsFile, lumi=options.lumi, readHists=options.readHists, quiet=options.quiet)
base_dir_data = '/afs/cern.ch/work/j/jkunkle/private/CMS/Wgamgam/Output/LepGammaGammaNoPhID_2014_10_29'
samplesData = SampleManager(base_dir_data, options.treeName,filename=options.fileName, xsFile=options.xsFile, lumi=options.lumi, quiet=options.quiet)
if options.samplesConf is not None :
samplesFF.ReadSamples( options.samplesConf )
samplesData.ReadSamples( options.samplesConf )
#cuts_den = '!ph_passChIsoCorrMedium[0] && !ph_passNeuIsoCorrMedium[0] && !ph_passPhoIsoCorrMedium[0] && ph_sigmaIEIE[0]>%f && ph_sigmaIEIE[0] < %f '
#cuts_num = 'ph_passChIsoCorrMedium[0] && ph_passNeuIsoCorrMedium[0] && ph_passPhoIsoCorrMedium[0] && %s && ph_passSIEIEMedium[0] ' %(ec)
loose_cuts = (12, 9, 9)
#loose_cuts = (20, 15, 15)
#loose_cuts = (1000000,1000000,1000000)
cuts_den = 'ph_hasPixSeed[0]==0 && !ph_passNeuIsoCorrMedium[0] && !ph_passPhoIsoCorrMedium[0] && !ph_passSIEIEMedium[0] && !ph_passChIsoCorrMedium[0] && ph_chIsoCorr[0] < %d && ph_neuIsoCorr[0] < %d && ph_phoIsoCorr[0] < %d ' %(loose_cuts[0], loose_cuts[1], loose_cuts[2] )
cuts_num = 'ph_hasPixSeed[0]==0 && ph_passNeuIsoCorrMedium[0] && ph_passPhoIsoCorrMedium[0] && ph_passSIEIEMedium[0] && ph_passChIsoCorrMedium[0] && ph_HoverE12[0] < 0.05'
regions = ['EB', 'EE']
ptbins = [ 15, 25, 40, 70, 1000000 ]
fake_factors = GetFakeFactors(cuts_den, cuts_num, regions, ptbins)
#for key, val in fake_factors.iteritems() :
# print 'Fake factors for %s' %key
# val.Draw()
# raw_input('continue')
#ApplySinglePhotonFF( cuts_den, sieie_cuts, eta_cuts, ptbins, fake_factors )
gg_regions = [ ('EB', 'EB') ]
ApplyDiPhotonFF( loose_cuts, gg_regions, ptbins, fake_factors )
def ApplyDiPhotonFF( loose_cuts, regions, ptbins, fake_factors) :
draw_cmd_base = 'mu_passtrig25_n>0 && mu_n==1 && ph_n > 1 && dr_ph1_ph2 > 0.3 && m_ph1_ph2>15 && dr_ph1_leadLep>0.4 && dr_ph2_leadLep>0.4 && ph_hasPixSeed[0]==0 && ph_hasPixSeed[1]==0 '
samp = samplesData.get_samples(name='Muon')
for r1,r2 in regions :
draw_cmd = draw_cmd_base + ' && is%s_leadph12 && is%s_sublph12' %( r1,r2)
draw_cmd_LL = '%s && chIsoCorr_leadph12 > 1.5 && neuIsoCorr_leadph12 > 1.0 && phoIsoCorr_leadph12 > 0.7 && chIsoCorr_sublph12 > 1.5 && neuIsoCorr_sublph12 > 1.0 && phoIsoCorr_sublph12 > 0.7 && chIsoCorr_leadph12 < %d && neuIsoCorr_leadph12 < %d && phoIsoCorr_leadph12 < %d && chIsoCorr_sublph12 < %d && neuIsoCorr_sublph12 < %d && phoIsoCorr_sublph12 < %d && sieie_leadph12 > 0.011 && sieie_sublph12 > 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2], loose_cuts[0], loose_cuts[1], loose_cuts[2])
draw_cmd_TL = '%s && chIsoCorr_leadph12 < 1.5 && neuIsoCorr_leadph12 < 1.0 && phoIsoCorr_leadph12 < 0.7 && chIsoCorr_sublph12 > 1.5 && neuIsoCorr_sublph12 > 1.0 && phoIsoCorr_sublph12 > 0.7 && chIsoCorr_sublph12 < %d && neuIsoCorr_sublph12 < %d && phoIsoCorr_sublph12 < %d && sieie_leadph12 < 0.011 && sieie_sublph12 > 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2])
draw_cmd_LT = '%s && chIsoCorr_leadph12 > 1.5 && neuIsoCorr_leadph12 > 1.0 && phoIsoCorr_leadph12 > 0.7 && chIsoCorr_sublph12 < 1.5 && neuIsoCorr_sublph12 < 1.0 && phoIsoCorr_sublph12 < 0.7 && chIsoCorr_leadph12 < %d && neuIsoCorr_leadph12 < %d && phoIsoCorr_leadph12 < %d && sieie_leadph12 > 0.011 && sieie_sublph12 < 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2])
var = 'pt_leadph12'
samplesData.create_hist( samp[0], var, draw_cmd_LL, (100, 0, 500 ) )
LL_hist = samp[0].hist.Clone( 'll_hist' )
samplesData.create_hist( samp[0], var, draw_cmd_TL, (100, 0, 500 ) )
TL_hist = samp[0].hist.Clone( 'tl_hist' )
samplesData.create_hist( samp[0], var, draw_cmd_LT, (100, 0, 500 ) )
LT_hist = samp[0].hist.Clone( 'lt_hist' )
for idx, min in enumerate( ptbins[0:-1] ) :
max = ptbins[idx+1]
bin_lead = ( r1, min, max )
bin_subl = ( r2, 15, max )
LL_count = LL_hist.Integral( LL_hist.FindBin( min ), LL_hist.FindBin( max ) - 1 )
LT_count = LT_hist.Integral( LT_hist.FindBin( min ), LT_hist.FindBin( max ) - 1 )
TL_count = TL_hist.Integral( TL_hist.FindBin( min ), TL_hist.FindBin( max ) - 1 )
ff_lead = fake_factors[bin_lead]
ff_subl = fake_factors[bin_subl]
N_FF_TL = LL_count*ff_lead
N_FF_LT = LL_count*ff_subl
N_RF_TL = TL_count - N_FF_TL
N_FR_LT = LT_count - N_FF_LT
N_RF_TT = N_RF_TL*ff_subl
N_FR_TT = N_FR_LT*ff_lead
N_FF_TT = LL_count*ff_lead*ff_subl
print 'Lead pt = %d - %d, subl pt = %d - %d' %( min, max, 15, max )
print 'N_LL = ', LL_count
print 'N_LT = ', LT_count
print 'N_TL = ', TL_count
print 'N_FF_TL = ', N_FF_TL
print 'N_FF_LT = ', N_FF_LT
print 'N_RF_TL = ', N_RF_TL
print 'N_FR_LT = ', N_FR_LT
print 'N_RF_TT = ', N_RF_TT
print 'N_FR_TT = ', N_FR_TT
print 'N_FF_TT = ', N_FF_TT
print 'Sum = ', (N_RF_TT+N_FR_TT+N_FF_TT)
def ApplySinglePhotonFF( cut_str, cut_vals, eta_cuts, ptbins, fake_factors) :
labels = cut_vals.keys()
samp = samplesData.get_samples(name='Data')
sampEval = samplesData.get_samples(name='WjetsZjets')
for lab in labels :
ec = eta_cuts[lab]
vals = cut_vals[lab]
cuts_den = cut_str%vals
cuts_den += ' && ' + ec
den_base = ' mu_passtrig25_n>0 && mu_n==1 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s ' %cuts_den
#generate weighting string
weight_str = ''
var = 'ph_pt[0]'
for idx, min in enumerate(ptbins[:-1]) :
max = ptbins[idx+1]
ff = fake_factors[lab].GetBinContent( fake_factors[lab].FindBin( min ) )
weight_str += ' %f * ( %s > %f && %s <= %f ) +' %( ff, var, min, var, max )
weight_str = weight_str.rstrip(' ').rstrip('+')
tot_str = ' ( %s ) * ( %s ) ' %( weight_str, den_base )
print tot_str
binning = ( 100, 0, 500 )
ddhist = None
evalhist = None
if samp:
samples.create_hist( samp[0], var, tot_str , binning )
ddhist = samp[0].hist.Clone('ddhist')
if sampEval :
samples.create_hist( sampEval[0], var, 'PUWeight * ( mu_passtrig25_n>0 && mu_n==1 && ph_n==1 && ph_passMedium[0] && %s)'%ec , binning )
evalhist = sampEval[0].hist.Clone( 'evalHist' )
evalhist.SetMarkerColor( ROOT.kRed )
evalhist.SetLineColor( ROOT.kRed )
ddhist.Draw()
evalhist.Draw('same')
raw_input('contin')
def GetFakeFactors(cut_den_base, cut_num_base, regions, ptbins) :
binning = ( 500, 0, 500 )
den_sample = 'MuonRealPhotonZgSub'
samp = samplesFF.get_samples(name=den_sample)
fake_factors = {}
output = {}
for reg in regions :
cuts_den = cut_den_base
cuts_den += ' && ph_Is%s[0]' %reg
cuts_num = cut_num_base
cuts_num += ' && ph_Is%s[0]' %reg
full_den_cuts = ' mu_passtrig25_n>0 && mu_n==2 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s && fabs( m_leplep-91.2 ) < 5 && leadPhot_sublLepDR >1 && leadPhot_leadLepDR>1 ' %cuts_den
full_num_cuts = ' mu_passtrig25_n>0 && mu_n==2 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s && fabs( m_leplep-91.2 ) < 5 && leadPhot_sublLepDR >1 && leadPhot_leadLepDR>1 ' %cuts_num
#generate histograms
den_hist = None
num_hist = None
var = 'ph_pt[0]'
if samp:
samplesFF.create_hist( samp[0], var, full_den_cuts, binning )
den_hist = samp[0].hist.Clone( 'den_hist' )
samplesFF.create_hist( samp[0], var, full_num_cuts, binning )
num_hist = samp[0].hist.Clone( 'num_hist' )
for idx, min in enumerate( ptbins[0:-1] ) :
max = ptbins[idx+1]
bin = ( reg, min, max )
num_count = num_hist.Integral( num_hist.FindBin( min), num_hist.FindBin(max ) - 1 )
den_count = den_hist.Integral( den_hist.FindBin( min), den_hist.FindBin(max ) - 1 )
factor = num_count/den_count
print 'Pt bins %f - %f, N num = %f, N den = %f, fake factor = %f ' %( min, max, num_count, den_count, factor )
output[bin] = factor
for idx, max in enumerate( ptbins[1:] ) :
|
return output
main()
| bin = (reg, 15, max )
if bin in output :
continue
num_count = num_hist.Integral( num_hist.FindBin( 15), num_hist.FindBin(max ) - 1 )
den_count = den_hist.Integral( den_hist.FindBin( 15), den_hist.FindBin(max ) - 1 )
factor = num_count/den_count
output[bin] = factor | conditional_block |
DoJetFakeFactors.py | """
Interactive script to plot data-MC histograms out of a set of trees.
"""
# Parse command-line options
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('--baseDir', default=None, dest='baseDir', help='Path to base directory containing all ntuples')
p.add_argument('--baseDirModel', default=None, dest='baseDirModel', help='Path to base directory containing all ntuples for the model')
p.add_argument('--fileName', default='ntuple.root', dest='fileName', help='( Default ntuple.root ) Name of files')
p.add_argument('--treeName', default='events' , dest='treeName', help='( Default events ) Name tree in root file')
p.add_argument('--treeNameModel', default='photons' , dest='treeNameModel',help='( Default photons ) Name tree in root file')
p.add_argument('--samplesConf', default=None, dest='samplesConf', help=('Use alternate sample configuration. '
'Must be a python file that implements the configuration '
'in the same manner as in the main() of this script. If only '
'the file name is given it is assumed to be in the same directory '
'as this script, if a path is given, use that path' ) )
p.add_argument('--xsFile', default=None, type=str , dest='xsFile', help='path to cross section file. When calling AddSample in the configuration module, set useXSFile=True to get weights from the provided file')
p.add_argument('--lumi', default=None, type=float , dest='lumi', help='Integrated luminosity (to use with xsFile)')
p.add_argument('--mcweight', default=None, type=float , dest='mcweight', help='Weight to apply to MC samples')
p.add_argument('--outputDir', default=None, type=str , dest='outputDir', help='output directory for histograms')
p.add_argument('--readHists', default=False,action='store_true', dest='readHists', help='read histograms from root files instead of trees')
p.add_argument('--quiet', default=False,action='store_true', dest='quiet', help='disable information messages')
options = p.parse_args()
import sys
import os
import re
import math
import uuid
import copy
import imp
import ROOT
from array import array
import random
from SampleManager import SampleManager
from SampleManager import Sample
ROOT.gROOT.SetBatch(False)
samplesFF = None
samplesData = None
def main() :
global samplesFF
global samplesData
if not options.baseDir.count('/eos/') and not os.path.isdir( options.baseDir ) :
print 'baseDir not found!'
return
samplesFF = SampleManager(options.baseDir, options.treeName, mcweight=options.mcweight, treeNameModel=options.treeNameModel, filename=options.fileName, base_path_model=options.baseDirModel, xsFile=options.xsFile, lumi=options.lumi, readHists=options.readHists, quiet=options.quiet)
base_dir_data = '/afs/cern.ch/work/j/jkunkle/private/CMS/Wgamgam/Output/LepGammaGammaNoPhID_2014_10_29'
samplesData = SampleManager(base_dir_data, options.treeName,filename=options.fileName, xsFile=options.xsFile, lumi=options.lumi, quiet=options.quiet)
if options.samplesConf is not None :
samplesFF.ReadSamples( options.samplesConf )
samplesData.ReadSamples( options.samplesConf )
#cuts_den = '!ph_passChIsoCorrMedium[0] && !ph_passNeuIsoCorrMedium[0] && !ph_passPhoIsoCorrMedium[0] && ph_sigmaIEIE[0]>%f && ph_sigmaIEIE[0] < %f '
#cuts_num = 'ph_passChIsoCorrMedium[0] && ph_passNeuIsoCorrMedium[0] && ph_passPhoIsoCorrMedium[0] && %s && ph_passSIEIEMedium[0] ' %(ec)
loose_cuts = (12, 9, 9)
#loose_cuts = (20, 15, 15)
#loose_cuts = (1000000,1000000,1000000)
cuts_den = 'ph_hasPixSeed[0]==0 && !ph_passNeuIsoCorrMedium[0] && !ph_passPhoIsoCorrMedium[0] && !ph_passSIEIEMedium[0] && !ph_passChIsoCorrMedium[0] && ph_chIsoCorr[0] < %d && ph_neuIsoCorr[0] < %d && ph_phoIsoCorr[0] < %d ' %(loose_cuts[0], loose_cuts[1], loose_cuts[2] )
cuts_num = 'ph_hasPixSeed[0]==0 && ph_passNeuIsoCorrMedium[0] && ph_passPhoIsoCorrMedium[0] && ph_passSIEIEMedium[0] && ph_passChIsoCorrMedium[0] && ph_HoverE12[0] < 0.05'
regions = ['EB', 'EE']
ptbins = [ 15, 25, 40, 70, 1000000 ]
fake_factors = GetFakeFactors(cuts_den, cuts_num, regions, ptbins)
#for key, val in fake_factors.iteritems() :
# print 'Fake factors for %s' %key
# val.Draw()
# raw_input('continue')
#ApplySinglePhotonFF( cuts_den, sieie_cuts, eta_cuts, ptbins, fake_factors )
gg_regions = [ ('EB', 'EB') ]
ApplyDiPhotonFF( loose_cuts, gg_regions, ptbins, fake_factors )
def ApplyDiPhotonFF( loose_cuts, regions, ptbins, fake_factors) :
draw_cmd_base = 'mu_passtrig25_n>0 && mu_n==1 && ph_n > 1 && dr_ph1_ph2 > 0.3 && m_ph1_ph2>15 && dr_ph1_leadLep>0.4 && dr_ph2_leadLep>0.4 && ph_hasPixSeed[0]==0 && ph_hasPixSeed[1]==0 '
samp = samplesData.get_samples(name='Muon')
for r1,r2 in regions :
draw_cmd = draw_cmd_base + ' && is%s_leadph12 && is%s_sublph12' %( r1,r2)
draw_cmd_LL = '%s && chIsoCorr_leadph12 > 1.5 && neuIsoCorr_leadph12 > 1.0 && phoIsoCorr_leadph12 > 0.7 && chIsoCorr_sublph12 > 1.5 && neuIsoCorr_sublph12 > 1.0 && phoIsoCorr_sublph12 > 0.7 && chIsoCorr_leadph12 < %d && neuIsoCorr_leadph12 < %d && phoIsoCorr_leadph12 < %d && chIsoCorr_sublph12 < %d && neuIsoCorr_sublph12 < %d && phoIsoCorr_sublph12 < %d && sieie_leadph12 > 0.011 && sieie_sublph12 > 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2], loose_cuts[0], loose_cuts[1], loose_cuts[2])
draw_cmd_TL = '%s && chIsoCorr_leadph12 < 1.5 && neuIsoCorr_leadph12 < 1.0 && phoIsoCorr_leadph12 < 0.7 && chIsoCorr_sublph12 > 1.5 && neuIsoCorr_sublph12 > 1.0 && phoIsoCorr_sublph12 > 0.7 && chIsoCorr_sublph12 < %d && neuIsoCorr_sublph12 < %d && phoIsoCorr_sublph12 < %d && sieie_leadph12 < 0.011 && sieie_sublph12 > 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2])
draw_cmd_LT = '%s && chIsoCorr_leadph12 > 1.5 && neuIsoCorr_leadph12 > 1.0 && phoIsoCorr_leadph12 > 0.7 && chIsoCorr_sublph12 < 1.5 && neuIsoCorr_sublph12 < 1.0 && phoIsoCorr_sublph12 < 0.7 && chIsoCorr_leadph12 < %d && neuIsoCorr_leadph12 < %d && phoIsoCorr_leadph12 < %d && sieie_leadph12 > 0.011 && sieie_sublph12 < 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2])
var = 'pt_leadph12'
samplesData.create_hist( samp[0], var, draw_cmd_LL, (100, 0, 500 ) )
LL_hist = samp[0].hist.Clone( 'll_hist' )
samplesData.create_hist( samp[0], var, draw_cmd_TL, (100, 0, 500 ) )
TL_hist = samp[0].hist.Clone( 'tl_hist' )
samplesData.create_hist( samp[0], var, draw_cmd_LT, (100, 0, 500 ) )
LT_hist = samp[0].hist.Clone( 'lt_hist' )
for idx, min in enumerate( ptbins[0:-1] ) :
max = ptbins[idx+1]
bin_lead = ( r1, min, max )
bin_subl = ( r2, 15, max )
LL_count = LL_hist.Integral( LL_hist.FindBin( min ), LL_hist.FindBin( max ) - 1 )
LT_count = LT_hist.Integral( LT_hist.FindBin( min ), LT_hist.FindBin( max ) - 1 )
TL_count = TL_hist.Integral( TL_hist.FindBin( min ), TL_hist.FindBin( max ) - 1 )
ff_lead = fake_factors[bin_lead]
ff_subl = fake_factors[bin_subl]
N_FF_TL = LL_count*ff_lead
N_FF_LT = LL_count*ff_subl
N_RF_TL = TL_count - N_FF_TL
N_FR_LT = LT_count - N_FF_LT
N_RF_TT = N_RF_TL*ff_subl
N_FR_TT = N_FR_LT*ff_lead
N_FF_TT = LL_count*ff_lead*ff_subl
print 'Lead pt = %d - %d, subl pt = %d - %d' %( min, max, 15, max )
print 'N_LL = ', LL_count
print 'N_LT = ', LT_count
print 'N_TL = ', TL_count
print 'N_FF_TL = ', N_FF_TL
print 'N_FF_LT = ', N_FF_LT
print 'N_RF_TL = ', N_RF_TL
print 'N_FR_LT = ', N_FR_LT
print 'N_RF_TT = ', N_RF_TT
print 'N_FR_TT = ', N_FR_TT
print 'N_FF_TT = ', N_FF_TT
print 'Sum = ', (N_RF_TT+N_FR_TT+N_FF_TT)
def | ( cut_str, cut_vals, eta_cuts, ptbins, fake_factors) :
labels = cut_vals.keys()
samp = samplesData.get_samples(name='Data')
sampEval = samplesData.get_samples(name='WjetsZjets')
for lab in labels :
ec = eta_cuts[lab]
vals = cut_vals[lab]
cuts_den = cut_str%vals
cuts_den += ' && ' + ec
den_base = ' mu_passtrig25_n>0 && mu_n==1 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s ' %cuts_den
#generate weighting string
weight_str = ''
var = 'ph_pt[0]'
for idx, min in enumerate(ptbins[:-1]) :
max = ptbins[idx+1]
ff = fake_factors[lab].GetBinContent( fake_factors[lab].FindBin( min ) )
weight_str += ' %f * ( %s > %f && %s <= %f ) +' %( ff, var, min, var, max )
weight_str = weight_str.rstrip(' ').rstrip('+')
tot_str = ' ( %s ) * ( %s ) ' %( weight_str, den_base )
print tot_str
binning = ( 100, 0, 500 )
ddhist = None
evalhist = None
if samp:
samples.create_hist( samp[0], var, tot_str , binning )
ddhist = samp[0].hist.Clone('ddhist')
if sampEval :
samples.create_hist( sampEval[0], var, 'PUWeight * ( mu_passtrig25_n>0 && mu_n==1 && ph_n==1 && ph_passMedium[0] && %s)'%ec , binning )
evalhist = sampEval[0].hist.Clone( 'evalHist' )
evalhist.SetMarkerColor( ROOT.kRed )
evalhist.SetLineColor( ROOT.kRed )
ddhist.Draw()
evalhist.Draw('same')
raw_input('contin')
def GetFakeFactors(cut_den_base, cut_num_base, regions, ptbins) :
binning = ( 500, 0, 500 )
den_sample = 'MuonRealPhotonZgSub'
samp = samplesFF.get_samples(name=den_sample)
fake_factors = {}
output = {}
for reg in regions :
cuts_den = cut_den_base
cuts_den += ' && ph_Is%s[0]' %reg
cuts_num = cut_num_base
cuts_num += ' && ph_Is%s[0]' %reg
full_den_cuts = ' mu_passtrig25_n>0 && mu_n==2 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s && fabs( m_leplep-91.2 ) < 5 && leadPhot_sublLepDR >1 && leadPhot_leadLepDR>1 ' %cuts_den
full_num_cuts = ' mu_passtrig25_n>0 && mu_n==2 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s && fabs( m_leplep-91.2 ) < 5 && leadPhot_sublLepDR >1 && leadPhot_leadLepDR>1 ' %cuts_num
#generate histograms
den_hist = None
num_hist = None
var = 'ph_pt[0]'
if samp:
samplesFF.create_hist( samp[0], var, full_den_cuts, binning )
den_hist = samp[0].hist.Clone( 'den_hist' )
samplesFF.create_hist( samp[0], var, full_num_cuts, binning )
num_hist = samp[0].hist.Clone( 'num_hist' )
for idx, min in enumerate( ptbins[0:-1] ) :
max = ptbins[idx+1]
bin = ( reg, min, max )
num_count = num_hist.Integral( num_hist.FindBin( min), num_hist.FindBin(max ) - 1 )
den_count = den_hist.Integral( den_hist.FindBin( min), den_hist.FindBin(max ) - 1 )
factor = num_count/den_count
print 'Pt bins %f - %f, N num = %f, N den = %f, fake factor = %f ' %( min, max, num_count, den_count, factor )
output[bin] = factor
for idx, max in enumerate( ptbins[1:] ) :
bin = (reg, 15, max )
if bin in output :
continue
num_count = num_hist.Integral( num_hist.FindBin( 15), num_hist.FindBin(max ) - 1 )
den_count = den_hist.Integral( den_hist.FindBin( 15), den_hist.FindBin(max ) - 1 )
factor = num_count/den_count
output[bin] = factor
return output
main()
| ApplySinglePhotonFF | identifier_name |
DoJetFakeFactors.py | """
Interactive script to plot data-MC histograms out of a set of trees.
"""
# Parse command-line options
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('--baseDir', default=None, dest='baseDir', help='Path to base directory containing all ntuples')
p.add_argument('--baseDirModel', default=None, dest='baseDirModel', help='Path to base directory containing all ntuples for the model')
p.add_argument('--fileName', default='ntuple.root', dest='fileName', help='( Default ntuple.root ) Name of files')
p.add_argument('--treeName', default='events' , dest='treeName', help='( Default events ) Name tree in root file')
p.add_argument('--treeNameModel', default='photons' , dest='treeNameModel',help='( Default photons ) Name tree in root file')
p.add_argument('--samplesConf', default=None, dest='samplesConf', help=('Use alternate sample configuration. '
'Must be a python file that implements the configuration '
'in the same manner as in the main() of this script. If only '
'the file name is given it is assumed to be in the same directory '
'as this script, if a path is given, use that path' ) )
p.add_argument('--xsFile', default=None, type=str , dest='xsFile', help='path to cross section file. When calling AddSample in the configuration module, set useXSFile=True to get weights from the provided file')
p.add_argument('--lumi', default=None, type=float , dest='lumi', help='Integrated luminosity (to use with xsFile)')
p.add_argument('--mcweight', default=None, type=float , dest='mcweight', help='Weight to apply to MC samples')
p.add_argument('--outputDir', default=None, type=str , dest='outputDir', help='output directory for histograms')
p.add_argument('--readHists', default=False,action='store_true', dest='readHists', help='read histograms from root files instead of trees')
p.add_argument('--quiet', default=False,action='store_true', dest='quiet', help='disable information messages')
options = p.parse_args()
import sys
import os
import re
import math
import uuid
import copy
import imp
import ROOT
from array import array
import random
from SampleManager import SampleManager
from SampleManager import Sample
ROOT.gROOT.SetBatch(False)
samplesFF = None
samplesData = None
def main() :
global samplesFF
global samplesData
if not options.baseDir.count('/eos/') and not os.path.isdir( options.baseDir ) :
print 'baseDir not found!'
return
samplesFF = SampleManager(options.baseDir, options.treeName, mcweight=options.mcweight, treeNameModel=options.treeNameModel, filename=options.fileName, base_path_model=options.baseDirModel, xsFile=options.xsFile, lumi=options.lumi, readHists=options.readHists, quiet=options.quiet)
base_dir_data = '/afs/cern.ch/work/j/jkunkle/private/CMS/Wgamgam/Output/LepGammaGammaNoPhID_2014_10_29'
samplesData = SampleManager(base_dir_data, options.treeName,filename=options.fileName, xsFile=options.xsFile, lumi=options.lumi, quiet=options.quiet)
if options.samplesConf is not None :
samplesFF.ReadSamples( options.samplesConf )
samplesData.ReadSamples( options.samplesConf )
#cuts_den = '!ph_passChIsoCorrMedium[0] && !ph_passNeuIsoCorrMedium[0] && !ph_passPhoIsoCorrMedium[0] && ph_sigmaIEIE[0]>%f && ph_sigmaIEIE[0] < %f '
#cuts_num = 'ph_passChIsoCorrMedium[0] && ph_passNeuIsoCorrMedium[0] && ph_passPhoIsoCorrMedium[0] && %s && ph_passSIEIEMedium[0] ' %(ec)
loose_cuts = (12, 9, 9)
#loose_cuts = (20, 15, 15)
#loose_cuts = (1000000,1000000,1000000)
cuts_den = 'ph_hasPixSeed[0]==0 && !ph_passNeuIsoCorrMedium[0] && !ph_passPhoIsoCorrMedium[0] && !ph_passSIEIEMedium[0] && !ph_passChIsoCorrMedium[0] && ph_chIsoCorr[0] < %d && ph_neuIsoCorr[0] < %d && ph_phoIsoCorr[0] < %d ' %(loose_cuts[0], loose_cuts[1], loose_cuts[2] )
cuts_num = 'ph_hasPixSeed[0]==0 && ph_passNeuIsoCorrMedium[0] && ph_passPhoIsoCorrMedium[0] && ph_passSIEIEMedium[0] && ph_passChIsoCorrMedium[0] && ph_HoverE12[0] < 0.05'
regions = ['EB', 'EE']
ptbins = [ 15, 25, 40, 70, 1000000 ]
fake_factors = GetFakeFactors(cuts_den, cuts_num, regions, ptbins)
#for key, val in fake_factors.iteritems() :
# print 'Fake factors for %s' %key
# val.Draw()
# raw_input('continue')
#ApplySinglePhotonFF( cuts_den, sieie_cuts, eta_cuts, ptbins, fake_factors )
gg_regions = [ ('EB', 'EB') ]
ApplyDiPhotonFF( loose_cuts, gg_regions, ptbins, fake_factors )
def ApplyDiPhotonFF( loose_cuts, regions, ptbins, fake_factors) :
draw_cmd_base = 'mu_passtrig25_n>0 && mu_n==1 && ph_n > 1 && dr_ph1_ph2 > 0.3 && m_ph1_ph2>15 && dr_ph1_leadLep>0.4 && dr_ph2_leadLep>0.4 && ph_hasPixSeed[0]==0 && ph_hasPixSeed[1]==0 '
samp = samplesData.get_samples(name='Muon')
for r1,r2 in regions :
draw_cmd = draw_cmd_base + ' && is%s_leadph12 && is%s_sublph12' %( r1,r2)
draw_cmd_LL = '%s && chIsoCorr_leadph12 > 1.5 && neuIsoCorr_leadph12 > 1.0 && phoIsoCorr_leadph12 > 0.7 && chIsoCorr_sublph12 > 1.5 && neuIsoCorr_sublph12 > 1.0 && phoIsoCorr_sublph12 > 0.7 && chIsoCorr_leadph12 < %d && neuIsoCorr_leadph12 < %d && phoIsoCorr_leadph12 < %d && chIsoCorr_sublph12 < %d && neuIsoCorr_sublph12 < %d && phoIsoCorr_sublph12 < %d && sieie_leadph12 > 0.011 && sieie_sublph12 > 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2], loose_cuts[0], loose_cuts[1], loose_cuts[2])
draw_cmd_TL = '%s && chIsoCorr_leadph12 < 1.5 && neuIsoCorr_leadph12 < 1.0 && phoIsoCorr_leadph12 < 0.7 && chIsoCorr_sublph12 > 1.5 && neuIsoCorr_sublph12 > 1.0 && phoIsoCorr_sublph12 > 0.7 && chIsoCorr_sublph12 < %d && neuIsoCorr_sublph12 < %d && phoIsoCorr_sublph12 < %d && sieie_leadph12 < 0.011 && sieie_sublph12 > 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2])
draw_cmd_LT = '%s && chIsoCorr_leadph12 > 1.5 && neuIsoCorr_leadph12 > 1.0 && phoIsoCorr_leadph12 > 0.7 && chIsoCorr_sublph12 < 1.5 && neuIsoCorr_sublph12 < 1.0 && phoIsoCorr_sublph12 < 0.7 && chIsoCorr_leadph12 < %d && neuIsoCorr_leadph12 < %d && phoIsoCorr_leadph12 < %d && sieie_leadph12 > 0.011 && sieie_sublph12 < 0.011' %(draw_cmd, loose_cuts[0], loose_cuts[1], loose_cuts[2])
var = 'pt_leadph12'
samplesData.create_hist( samp[0], var, draw_cmd_LL, (100, 0, 500 ) )
LL_hist = samp[0].hist.Clone( 'll_hist' )
samplesData.create_hist( samp[0], var, draw_cmd_TL, (100, 0, 500 ) )
TL_hist = samp[0].hist.Clone( 'tl_hist' )
samplesData.create_hist( samp[0], var, draw_cmd_LT, (100, 0, 500 ) )
LT_hist = samp[0].hist.Clone( 'lt_hist' )
for idx, min in enumerate( ptbins[0:-1] ) :
max = ptbins[idx+1]
bin_lead = ( r1, min, max )
bin_subl = ( r2, 15, max )
LL_count = LL_hist.Integral( LL_hist.FindBin( min ), LL_hist.FindBin( max ) - 1 )
LT_count = LT_hist.Integral( LT_hist.FindBin( min ), LT_hist.FindBin( max ) - 1 )
TL_count = TL_hist.Integral( TL_hist.FindBin( min ), TL_hist.FindBin( max ) - 1 )
ff_lead = fake_factors[bin_lead]
ff_subl = fake_factors[bin_subl]
N_FF_TL = LL_count*ff_lead
N_FF_LT = LL_count*ff_subl
N_RF_TL = TL_count - N_FF_TL
N_FR_LT = LT_count - N_FF_LT
N_RF_TT = N_RF_TL*ff_subl | N_FF_TT = LL_count*ff_lead*ff_subl
print 'Lead pt = %d - %d, subl pt = %d - %d' %( min, max, 15, max )
print 'N_LL = ', LL_count
print 'N_LT = ', LT_count
print 'N_TL = ', TL_count
print 'N_FF_TL = ', N_FF_TL
print 'N_FF_LT = ', N_FF_LT
print 'N_RF_TL = ', N_RF_TL
print 'N_FR_LT = ', N_FR_LT
print 'N_RF_TT = ', N_RF_TT
print 'N_FR_TT = ', N_FR_TT
print 'N_FF_TT = ', N_FF_TT
print 'Sum = ', (N_RF_TT+N_FR_TT+N_FF_TT)
def ApplySinglePhotonFF( cut_str, cut_vals, eta_cuts, ptbins, fake_factors) :
labels = cut_vals.keys()
samp = samplesData.get_samples(name='Data')
sampEval = samplesData.get_samples(name='WjetsZjets')
for lab in labels :
ec = eta_cuts[lab]
vals = cut_vals[lab]
cuts_den = cut_str%vals
cuts_den += ' && ' + ec
den_base = ' mu_passtrig25_n>0 && mu_n==1 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s ' %cuts_den
#generate weighting string
weight_str = ''
var = 'ph_pt[0]'
for idx, min in enumerate(ptbins[:-1]) :
max = ptbins[idx+1]
ff = fake_factors[lab].GetBinContent( fake_factors[lab].FindBin( min ) )
weight_str += ' %f * ( %s > %f && %s <= %f ) +' %( ff, var, min, var, max )
weight_str = weight_str.rstrip(' ').rstrip('+')
tot_str = ' ( %s ) * ( %s ) ' %( weight_str, den_base )
print tot_str
binning = ( 100, 0, 500 )
ddhist = None
evalhist = None
if samp:
samples.create_hist( samp[0], var, tot_str , binning )
ddhist = samp[0].hist.Clone('ddhist')
if sampEval :
samples.create_hist( sampEval[0], var, 'PUWeight * ( mu_passtrig25_n>0 && mu_n==1 && ph_n==1 && ph_passMedium[0] && %s)'%ec , binning )
evalhist = sampEval[0].hist.Clone( 'evalHist' )
evalhist.SetMarkerColor( ROOT.kRed )
evalhist.SetLineColor( ROOT.kRed )
ddhist.Draw()
evalhist.Draw('same')
raw_input('contin')
def GetFakeFactors(cut_den_base, cut_num_base, regions, ptbins) :
binning = ( 500, 0, 500 )
den_sample = 'MuonRealPhotonZgSub'
samp = samplesFF.get_samples(name=den_sample)
fake_factors = {}
output = {}
for reg in regions :
cuts_den = cut_den_base
cuts_den += ' && ph_Is%s[0]' %reg
cuts_num = cut_num_base
cuts_num += ' && ph_Is%s[0]' %reg
full_den_cuts = ' mu_passtrig25_n>0 && mu_n==2 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s && fabs( m_leplep-91.2 ) < 5 && leadPhot_sublLepDR >1 && leadPhot_leadLepDR>1 ' %cuts_den
full_num_cuts = ' mu_passtrig25_n>0 && mu_n==2 && ph_n==1 && ph_HoverE12[0] < 0.05 && %s && fabs( m_leplep-91.2 ) < 5 && leadPhot_sublLepDR >1 && leadPhot_leadLepDR>1 ' %cuts_num
#generate histograms
den_hist = None
num_hist = None
var = 'ph_pt[0]'
if samp:
samplesFF.create_hist( samp[0], var, full_den_cuts, binning )
den_hist = samp[0].hist.Clone( 'den_hist' )
samplesFF.create_hist( samp[0], var, full_num_cuts, binning )
num_hist = samp[0].hist.Clone( 'num_hist' )
for idx, min in enumerate( ptbins[0:-1] ) :
max = ptbins[idx+1]
bin = ( reg, min, max )
num_count = num_hist.Integral( num_hist.FindBin( min), num_hist.FindBin(max ) - 1 )
den_count = den_hist.Integral( den_hist.FindBin( min), den_hist.FindBin(max ) - 1 )
factor = num_count/den_count
print 'Pt bins %f - %f, N num = %f, N den = %f, fake factor = %f ' %( min, max, num_count, den_count, factor )
output[bin] = factor
for idx, max in enumerate( ptbins[1:] ) :
bin = (reg, 15, max )
if bin in output :
continue
num_count = num_hist.Integral( num_hist.FindBin( 15), num_hist.FindBin(max ) - 1 )
den_count = den_hist.Integral( den_hist.FindBin( 15), den_hist.FindBin(max ) - 1 )
factor = num_count/den_count
output[bin] = factor
return output
main() | N_FR_TT = N_FR_LT*ff_lead | random_line_split |
huffman.rs | //! Length-limited Huffman Codes.
use crate::bit;
use alloc::{vec, vec::Vec};
use core::cmp;
use core2::io;
const MAX_BITWIDTH: u8 = 15;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Code {
pub width: u8,
pub bits: u16,
}
impl Code {
pub fn new(width: u8, bits: u16) -> Self {
debug_assert!(width <= MAX_BITWIDTH);
Code { width, bits }
}
fn inverse_endian(&self) -> Self {
let mut f = self.bits;
let mut t = 0;
for _ in 0..self.width {
t <<= 1;
t |= f & 1;
f >>= 1;
}
Code::new(self.width, t)
}
}
pub trait Builder: Sized {
type Instance;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()>;
fn finish(self) -> Self::Instance;
fn restore_canonical_huffman_codes(mut self, bitwidthes: &[u8]) -> io::Result<Self::Instance> {
debug_assert!(!bitwidthes.is_empty());
let mut symbols = bitwidthes
.iter()
.enumerate()
.filter(|&(_, &code_bitwidth)| code_bitwidth > 0)
.map(|(symbol, &code_bitwidth)| (symbol as u16, code_bitwidth))
.collect::<Vec<_>>();
symbols.sort_by_key(|x| x.1);
let mut code = 0;
let mut prev_width = 0;
for (symbol, bitwidth) in symbols {
code <<= bitwidth - prev_width;
self.set_mapping(symbol, Code::new(bitwidth, code))?;
code += 1;
prev_width = bitwidth;
}
Ok(self.finish())
}
}
pub struct DecoderBuilder {
table: Vec<u16>,
eob_symbol: Option<u16>,
safely_peek_bitwidth: Option<u8>,
max_bitwidth: u8,
}
impl DecoderBuilder {
pub fn new(
max_bitwidth: u8,
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> Self {
debug_assert!(max_bitwidth <= MAX_BITWIDTH);
DecoderBuilder {
table: vec![u16::from(MAX_BITWIDTH) + 1; 1 << max_bitwidth],
eob_symbol,
safely_peek_bitwidth,
max_bitwidth,
}
}
pub fn from_bitwidthes(
bitwidthes: &[u8],
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> io::Result<Decoder> {
let builder = Self::new(
bitwidthes.iter().cloned().max().unwrap_or(0),
safely_peek_bitwidth,
eob_symbol,
);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn safely_peek_bitwidth(&self) -> Option<u8> {
self.safely_peek_bitwidth
}
}
impl Builder for DecoderBuilder {
type Instance = Decoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert!(code.width <= self.max_bitwidth);
if Some(symbol) == self.eob_symbol {
self.safely_peek_bitwidth = Some(code.width);
}
// `bitwidth` encoded `to` value
let value = (symbol << 5) | u16::from(code.width);
// Sets the mapping to all possible indices
let code_be = code.inverse_endian();
for padding in 0..(1 << (self.max_bitwidth - code.width)) {
let i = ((padding << code.width) | code_be.bits) as usize;
if self.table[i] != u16::from(MAX_BITWIDTH) + 1 {
#[cfg(feature = "std")]
let message = format!(
"Bit region conflict: i={}, old_value={}, new_value={}, symbol={}, code={:?}",
i, self.table[i], value, symbol, code
);
#[cfg(not(feature = "std"))]
let message = "Bit region conflict";
return Err(io::Error::new(io::ErrorKind::InvalidData, message));
}
self.table[i] = value;
}
Ok(())
}
fn finish(self) -> Self::Instance {
Decoder {
table: self.table,
safely_peek_bitwidth: cmp::min(
self.max_bitwidth,
self.safely_peek_bitwidth.unwrap_or(1),
),
max_bitwidth: self.max_bitwidth,
}
}
}
#[derive(Debug)]
pub struct Decoder {
table: Vec<u16>,
safely_peek_bitwidth: u8,
max_bitwidth: u8,
}
impl Decoder {
pub fn safely_peek_bitwidth(&self) -> u8 {
self.safely_peek_bitwidth
}
#[inline(always)]
pub fn decode<R>(&self, reader: &mut bit::BitReader<R>) -> io::Result<u16>
where
R: io::Read,
{
let v = self.decode_unchecked(reader);
reader.check_last_error()?;
Ok(v)
}
#[inline(always)]
pub fn decode_unchecked<R>(&self, reader: &mut bit::BitReader<R>) -> u16
where
R: io::Read,
{
let mut value;
let mut bitwidth;
let mut peek_bitwidth = self.safely_peek_bitwidth;
loop {
let code = reader.peek_bits_unchecked(peek_bitwidth);
value = self.table[code as usize];
bitwidth = (value & 0b1_1111) as u8;
if bitwidth <= peek_bitwidth {
break;
}
if bitwidth > self.max_bitwidth {
reader.set_last_error(invalid_data_error!("Invalid huffman coded stream"));
break;
}
peek_bitwidth = bitwidth;
}
reader.skip_bits(bitwidth);
value >> 5
}
}
#[derive(Debug)]
pub struct EncoderBuilder {
table: Vec<Code>,
}
impl EncoderBuilder {
pub fn new(symbol_count: usize) -> Self {
EncoderBuilder {
table: vec![Code::new(0, 0); symbol_count],
}
}
pub fn from_bitwidthes(bitwidthes: &[u8]) -> io::Result<Encoder> {
let symbol_count = bitwidthes
.iter()
.enumerate()
.filter(|e| *e.1 > 0)
.last()
.map_or(0, |e| e.0)
+ 1;
let builder = Self::new(symbol_count);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn from_frequencies(symbol_frequencies: &[usize], max_bitwidth: u8) -> io::Result<Encoder> {
let max_bitwidth = cmp::min(
max_bitwidth,
ordinary_huffman_codes::calc_optimal_max_bitwidth(symbol_frequencies),
);
let code_bitwidthes = length_limited_huffman_codes::calc(max_bitwidth, symbol_frequencies);
Self::from_bitwidthes(&code_bitwidthes)
}
}
impl Builder for EncoderBuilder {
type Instance = Encoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert_eq!(self.table[symbol as usize], Code::new(0, 0));
self.table[symbol as usize] = code.inverse_endian();
Ok(())
}
fn finish(self) -> Self::Instance {
Encoder { table: self.table }
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
table: Vec<Code>,
}
impl Encoder {
#[inline(always)]
pub fn encode<W>(&self, writer: &mut bit::BitWriter<W>, symbol: u16) -> io::Result<()>
where
W: io::Write,
{
let code = self.lookup(symbol);
debug_assert_ne!(code, Code::new(0, 0));
writer.write_bits(code.width, code.bits)
}
#[inline(always)]
pub fn lookup(&self, symbol: u16) -> Code {
debug_assert!(
symbol < self.table.len() as u16,
"symbol:{}, table:{}",
symbol,
self.table.len()
);
self.table[symbol as usize].clone()
}
pub fn used_max_symbol(&self) -> Option<u16> {
self.table
.iter()
.rev()
.position(|x| x.width > 0)
.map(|trailing_zeros| (self.table.len() - 1 - trailing_zeros) as u16)
}
}
#[allow(dead_code)]
mod ordinary_huffman_codes {
use core::cmp;
use dary_heap::BinaryHeap;
pub fn calc_optimal_max_bitwidth(frequencies: &[usize]) -> u8 {
let mut heap = BinaryHeap::new();
for &freq in frequencies.iter().filter(|&&f| f > 0) {
let weight = -(freq as isize);
heap.push((weight, 0_u8));
}
while heap.len() > 1 {
let (weight1, width1) = heap.pop().unwrap();
let (weight2, width2) = heap.pop().unwrap();
heap.push((weight1 + weight2, 1 + cmp::max(width1, width2)));
}
let max_bitwidth = heap.pop().map_or(0, |x| x.1);
cmp::max(1, max_bitwidth)
}
}
mod length_limited_huffman_codes {
use alloc::{vec, vec::Vec};
use core::mem;
#[derive(Debug, Clone)]
struct Node {
symbols: Vec<u16>,
weight: usize,
}
impl Node {
pub fn empty() -> Self {
Node {
symbols: vec![],
weight: 0,
}
}
pub fn single(symbol: u16, weight: usize) -> Self {
Node {
symbols: vec![symbol],
weight,
}
}
pub fn merge(&mut self, other: Self) {
self.weight += other.weight;
self.symbols.extend(other.symbols);
}
}
/// Reference: [A Fast Algorithm for Optimal Length-Limited Huffman Codes][LenLimHuff.pdf]
///
/// [LenLimHuff.pdf]: https://www.ics.uci.edu/~dan/pubs/LenLimHuff.pdf
pub fn calc(max_bitwidth: u8, frequencies: &[usize]) -> Vec<u8> {
// NOTE: unoptimized implementation
let mut source = frequencies
.iter()
.enumerate()
.filter(|&(_, &f)| f > 0)
.map(|(symbol, &weight)| Node::single(symbol as u16, weight))
.collect::<Vec<_>>();
source.sort_by_key(|o| o.weight);
let weighted =
(0..max_bitwidth - 1).fold(source.clone(), |w, _| merge(package(w), source.clone()));
let mut code_bitwidthes = vec![0; frequencies.len()];
for symbol in package(weighted)
.into_iter()
.flat_map(|n| n.symbols.into_iter())
{
code_bitwidthes[symbol as usize] += 1;
}
code_bitwidthes
}
fn merge(x: Vec<Node>, y: Vec<Node>) -> Vec<Node> {
let mut z = Vec::with_capacity(x.len() + y.len());
let mut x = x.into_iter().peekable();
let mut y = y.into_iter().peekable();
loop {
let x_weight = x.peek().map(|s| s.weight);
let y_weight = y.peek().map(|s| s.weight);
if x_weight.is_none() {
z.extend(y);
break;
} else if y_weight.is_none() | else if x_weight < y_weight {
z.push(x.next().unwrap());
} else {
z.push(y.next().unwrap());
}
}
z
}
fn package(mut nodes: Vec<Node>) -> Vec<Node> {
if nodes.len() >= 2 {
let new_len = nodes.len() / 2;
for i in 0..new_len {
nodes[i] = mem::replace(&mut nodes[i * 2], Node::empty());
let other = mem::replace(&mut nodes[i * 2 + 1], Node::empty());
nodes[i].merge(other);
}
nodes.truncate(new_len);
}
nodes
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
}
| {
z.extend(x);
break;
} | conditional_block |
huffman.rs | //! Length-limited Huffman Codes.
use crate::bit;
use alloc::{vec, vec::Vec};
use core::cmp;
use core2::io;
const MAX_BITWIDTH: u8 = 15;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Code {
pub width: u8,
pub bits: u16,
}
impl Code {
pub fn new(width: u8, bits: u16) -> Self {
debug_assert!(width <= MAX_BITWIDTH);
Code { width, bits }
}
fn inverse_endian(&self) -> Self {
let mut f = self.bits;
let mut t = 0;
for _ in 0..self.width {
t <<= 1;
t |= f & 1;
f >>= 1;
}
Code::new(self.width, t)
}
}
pub trait Builder: Sized {
type Instance;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()>;
fn finish(self) -> Self::Instance;
fn restore_canonical_huffman_codes(mut self, bitwidthes: &[u8]) -> io::Result<Self::Instance> {
debug_assert!(!bitwidthes.is_empty());
let mut symbols = bitwidthes
.iter()
.enumerate()
.filter(|&(_, &code_bitwidth)| code_bitwidth > 0)
.map(|(symbol, &code_bitwidth)| (symbol as u16, code_bitwidth))
.collect::<Vec<_>>();
symbols.sort_by_key(|x| x.1);
let mut code = 0;
let mut prev_width = 0;
for (symbol, bitwidth) in symbols {
code <<= bitwidth - prev_width;
self.set_mapping(symbol, Code::new(bitwidth, code))?;
code += 1;
prev_width = bitwidth;
}
Ok(self.finish())
}
}
pub struct | {
table: Vec<u16>,
eob_symbol: Option<u16>,
safely_peek_bitwidth: Option<u8>,
max_bitwidth: u8,
}
impl DecoderBuilder {
pub fn new(
max_bitwidth: u8,
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> Self {
debug_assert!(max_bitwidth <= MAX_BITWIDTH);
DecoderBuilder {
table: vec![u16::from(MAX_BITWIDTH) + 1; 1 << max_bitwidth],
eob_symbol,
safely_peek_bitwidth,
max_bitwidth,
}
}
pub fn from_bitwidthes(
bitwidthes: &[u8],
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> io::Result<Decoder> {
let builder = Self::new(
bitwidthes.iter().cloned().max().unwrap_or(0),
safely_peek_bitwidth,
eob_symbol,
);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn safely_peek_bitwidth(&self) -> Option<u8> {
self.safely_peek_bitwidth
}
}
impl Builder for DecoderBuilder {
type Instance = Decoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert!(code.width <= self.max_bitwidth);
if Some(symbol) == self.eob_symbol {
self.safely_peek_bitwidth = Some(code.width);
}
// `bitwidth` encoded `to` value
let value = (symbol << 5) | u16::from(code.width);
// Sets the mapping to all possible indices
let code_be = code.inverse_endian();
for padding in 0..(1 << (self.max_bitwidth - code.width)) {
let i = ((padding << code.width) | code_be.bits) as usize;
if self.table[i] != u16::from(MAX_BITWIDTH) + 1 {
#[cfg(feature = "std")]
let message = format!(
"Bit region conflict: i={}, old_value={}, new_value={}, symbol={}, code={:?}",
i, self.table[i], value, symbol, code
);
#[cfg(not(feature = "std"))]
let message = "Bit region conflict";
return Err(io::Error::new(io::ErrorKind::InvalidData, message));
}
self.table[i] = value;
}
Ok(())
}
fn finish(self) -> Self::Instance {
Decoder {
table: self.table,
safely_peek_bitwidth: cmp::min(
self.max_bitwidth,
self.safely_peek_bitwidth.unwrap_or(1),
),
max_bitwidth: self.max_bitwidth,
}
}
}
#[derive(Debug)]
pub struct Decoder {
table: Vec<u16>,
safely_peek_bitwidth: u8,
max_bitwidth: u8,
}
impl Decoder {
pub fn safely_peek_bitwidth(&self) -> u8 {
self.safely_peek_bitwidth
}
#[inline(always)]
pub fn decode<R>(&self, reader: &mut bit::BitReader<R>) -> io::Result<u16>
where
R: io::Read,
{
let v = self.decode_unchecked(reader);
reader.check_last_error()?;
Ok(v)
}
#[inline(always)]
pub fn decode_unchecked<R>(&self, reader: &mut bit::BitReader<R>) -> u16
where
R: io::Read,
{
let mut value;
let mut bitwidth;
let mut peek_bitwidth = self.safely_peek_bitwidth;
loop {
let code = reader.peek_bits_unchecked(peek_bitwidth);
value = self.table[code as usize];
bitwidth = (value & 0b1_1111) as u8;
if bitwidth <= peek_bitwidth {
break;
}
if bitwidth > self.max_bitwidth {
reader.set_last_error(invalid_data_error!("Invalid huffman coded stream"));
break;
}
peek_bitwidth = bitwidth;
}
reader.skip_bits(bitwidth);
value >> 5
}
}
#[derive(Debug)]
pub struct EncoderBuilder {
table: Vec<Code>,
}
impl EncoderBuilder {
pub fn new(symbol_count: usize) -> Self {
EncoderBuilder {
table: vec![Code::new(0, 0); symbol_count],
}
}
pub fn from_bitwidthes(bitwidthes: &[u8]) -> io::Result<Encoder> {
let symbol_count = bitwidthes
.iter()
.enumerate()
.filter(|e| *e.1 > 0)
.last()
.map_or(0, |e| e.0)
+ 1;
let builder = Self::new(symbol_count);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn from_frequencies(symbol_frequencies: &[usize], max_bitwidth: u8) -> io::Result<Encoder> {
let max_bitwidth = cmp::min(
max_bitwidth,
ordinary_huffman_codes::calc_optimal_max_bitwidth(symbol_frequencies),
);
let code_bitwidthes = length_limited_huffman_codes::calc(max_bitwidth, symbol_frequencies);
Self::from_bitwidthes(&code_bitwidthes)
}
}
impl Builder for EncoderBuilder {
type Instance = Encoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert_eq!(self.table[symbol as usize], Code::new(0, 0));
self.table[symbol as usize] = code.inverse_endian();
Ok(())
}
fn finish(self) -> Self::Instance {
Encoder { table: self.table }
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
table: Vec<Code>,
}
impl Encoder {
#[inline(always)]
pub fn encode<W>(&self, writer: &mut bit::BitWriter<W>, symbol: u16) -> io::Result<()>
where
W: io::Write,
{
let code = self.lookup(symbol);
debug_assert_ne!(code, Code::new(0, 0));
writer.write_bits(code.width, code.bits)
}
#[inline(always)]
pub fn lookup(&self, symbol: u16) -> Code {
debug_assert!(
symbol < self.table.len() as u16,
"symbol:{}, table:{}",
symbol,
self.table.len()
);
self.table[symbol as usize].clone()
}
pub fn used_max_symbol(&self) -> Option<u16> {
self.table
.iter()
.rev()
.position(|x| x.width > 0)
.map(|trailing_zeros| (self.table.len() - 1 - trailing_zeros) as u16)
}
}
#[allow(dead_code)]
mod ordinary_huffman_codes {
use core::cmp;
use dary_heap::BinaryHeap;
pub fn calc_optimal_max_bitwidth(frequencies: &[usize]) -> u8 {
let mut heap = BinaryHeap::new();
for &freq in frequencies.iter().filter(|&&f| f > 0) {
let weight = -(freq as isize);
heap.push((weight, 0_u8));
}
while heap.len() > 1 {
let (weight1, width1) = heap.pop().unwrap();
let (weight2, width2) = heap.pop().unwrap();
heap.push((weight1 + weight2, 1 + cmp::max(width1, width2)));
}
let max_bitwidth = heap.pop().map_or(0, |x| x.1);
cmp::max(1, max_bitwidth)
}
}
mod length_limited_huffman_codes {
use alloc::{vec, vec::Vec};
use core::mem;
#[derive(Debug, Clone)]
struct Node {
symbols: Vec<u16>,
weight: usize,
}
impl Node {
pub fn empty() -> Self {
Node {
symbols: vec![],
weight: 0,
}
}
pub fn single(symbol: u16, weight: usize) -> Self {
Node {
symbols: vec![symbol],
weight,
}
}
pub fn merge(&mut self, other: Self) {
self.weight += other.weight;
self.symbols.extend(other.symbols);
}
}
/// Reference: [A Fast Algorithm for Optimal Length-Limited Huffman Codes][LenLimHuff.pdf]
///
/// [LenLimHuff.pdf]: https://www.ics.uci.edu/~dan/pubs/LenLimHuff.pdf
pub fn calc(max_bitwidth: u8, frequencies: &[usize]) -> Vec<u8> {
// NOTE: unoptimized implementation
let mut source = frequencies
.iter()
.enumerate()
.filter(|&(_, &f)| f > 0)
.map(|(symbol, &weight)| Node::single(symbol as u16, weight))
.collect::<Vec<_>>();
source.sort_by_key(|o| o.weight);
let weighted =
(0..max_bitwidth - 1).fold(source.clone(), |w, _| merge(package(w), source.clone()));
let mut code_bitwidthes = vec![0; frequencies.len()];
for symbol in package(weighted)
.into_iter()
.flat_map(|n| n.symbols.into_iter())
{
code_bitwidthes[symbol as usize] += 1;
}
code_bitwidthes
}
fn merge(x: Vec<Node>, y: Vec<Node>) -> Vec<Node> {
let mut z = Vec::with_capacity(x.len() + y.len());
let mut x = x.into_iter().peekable();
let mut y = y.into_iter().peekable();
loop {
let x_weight = x.peek().map(|s| s.weight);
let y_weight = y.peek().map(|s| s.weight);
if x_weight.is_none() {
z.extend(y);
break;
} else if y_weight.is_none() {
z.extend(x);
break;
} else if x_weight < y_weight {
z.push(x.next().unwrap());
} else {
z.push(y.next().unwrap());
}
}
z
}
fn package(mut nodes: Vec<Node>) -> Vec<Node> {
if nodes.len() >= 2 {
let new_len = nodes.len() / 2;
for i in 0..new_len {
nodes[i] = mem::replace(&mut nodes[i * 2], Node::empty());
let other = mem::replace(&mut nodes[i * 2 + 1], Node::empty());
nodes[i].merge(other);
}
nodes.truncate(new_len);
}
nodes
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
}
| DecoderBuilder | identifier_name |
huffman.rs | //! Length-limited Huffman Codes.
use crate::bit;
use alloc::{vec, vec::Vec};
use core::cmp;
use core2::io;
const MAX_BITWIDTH: u8 = 15;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Code {
pub width: u8,
pub bits: u16,
}
impl Code {
pub fn new(width: u8, bits: u16) -> Self {
debug_assert!(width <= MAX_BITWIDTH);
Code { width, bits }
}
fn inverse_endian(&self) -> Self {
let mut f = self.bits;
let mut t = 0;
for _ in 0..self.width {
t <<= 1;
t |= f & 1;
f >>= 1;
}
Code::new(self.width, t)
}
}
pub trait Builder: Sized {
type Instance;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()>;
fn finish(self) -> Self::Instance;
fn restore_canonical_huffman_codes(mut self, bitwidthes: &[u8]) -> io::Result<Self::Instance> {
debug_assert!(!bitwidthes.is_empty());
let mut symbols = bitwidthes
.iter()
.enumerate()
.filter(|&(_, &code_bitwidth)| code_bitwidth > 0)
.map(|(symbol, &code_bitwidth)| (symbol as u16, code_bitwidth))
.collect::<Vec<_>>();
symbols.sort_by_key(|x| x.1);
let mut code = 0;
let mut prev_width = 0;
for (symbol, bitwidth) in symbols {
code <<= bitwidth - prev_width;
self.set_mapping(symbol, Code::new(bitwidth, code))?;
code += 1;
prev_width = bitwidth;
}
Ok(self.finish())
}
}
pub struct DecoderBuilder {
table: Vec<u16>,
eob_symbol: Option<u16>,
safely_peek_bitwidth: Option<u8>,
max_bitwidth: u8,
}
impl DecoderBuilder {
pub fn new(
max_bitwidth: u8,
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> Self {
debug_assert!(max_bitwidth <= MAX_BITWIDTH);
DecoderBuilder {
table: vec![u16::from(MAX_BITWIDTH) + 1; 1 << max_bitwidth],
eob_symbol,
safely_peek_bitwidth,
max_bitwidth,
}
}
pub fn from_bitwidthes(
bitwidthes: &[u8],
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> io::Result<Decoder> {
let builder = Self::new(
bitwidthes.iter().cloned().max().unwrap_or(0),
safely_peek_bitwidth,
eob_symbol,
);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn safely_peek_bitwidth(&self) -> Option<u8> {
self.safely_peek_bitwidth
}
}
impl Builder for DecoderBuilder {
type Instance = Decoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert!(code.width <= self.max_bitwidth);
if Some(symbol) == self.eob_symbol {
self.safely_peek_bitwidth = Some(code.width);
}
// `bitwidth` encoded `to` value
let value = (symbol << 5) | u16::from(code.width);
// Sets the mapping to all possible indices
let code_be = code.inverse_endian();
for padding in 0..(1 << (self.max_bitwidth - code.width)) {
let i = ((padding << code.width) | code_be.bits) as usize;
if self.table[i] != u16::from(MAX_BITWIDTH) + 1 {
#[cfg(feature = "std")]
let message = format!(
"Bit region conflict: i={}, old_value={}, new_value={}, symbol={}, code={:?}",
i, self.table[i], value, symbol, code
);
#[cfg(not(feature = "std"))]
let message = "Bit region conflict";
return Err(io::Error::new(io::ErrorKind::InvalidData, message));
}
self.table[i] = value;
}
Ok(())
}
fn finish(self) -> Self::Instance {
Decoder {
table: self.table,
safely_peek_bitwidth: cmp::min(
self.max_bitwidth,
self.safely_peek_bitwidth.unwrap_or(1),
),
max_bitwidth: self.max_bitwidth,
}
}
}
#[derive(Debug)]
pub struct Decoder {
table: Vec<u16>,
safely_peek_bitwidth: u8,
max_bitwidth: u8,
}
impl Decoder {
pub fn safely_peek_bitwidth(&self) -> u8 {
self.safely_peek_bitwidth
}
#[inline(always)]
pub fn decode<R>(&self, reader: &mut bit::BitReader<R>) -> io::Result<u16>
where
R: io::Read,
{
let v = self.decode_unchecked(reader);
reader.check_last_error()?;
Ok(v)
}
#[inline(always)]
pub fn decode_unchecked<R>(&self, reader: &mut bit::BitReader<R>) -> u16
where
R: io::Read,
{
let mut value;
let mut bitwidth;
let mut peek_bitwidth = self.safely_peek_bitwidth;
loop {
let code = reader.peek_bits_unchecked(peek_bitwidth);
value = self.table[code as usize];
bitwidth = (value & 0b1_1111) as u8;
if bitwidth <= peek_bitwidth {
break;
}
if bitwidth > self.max_bitwidth {
reader.set_last_error(invalid_data_error!("Invalid huffman coded stream"));
break;
}
peek_bitwidth = bitwidth;
}
reader.skip_bits(bitwidth);
value >> 5
}
}
#[derive(Debug)] | EncoderBuilder {
table: vec![Code::new(0, 0); symbol_count],
}
}
pub fn from_bitwidthes(bitwidthes: &[u8]) -> io::Result<Encoder> {
let symbol_count = bitwidthes
.iter()
.enumerate()
.filter(|e| *e.1 > 0)
.last()
.map_or(0, |e| e.0)
+ 1;
let builder = Self::new(symbol_count);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn from_frequencies(symbol_frequencies: &[usize], max_bitwidth: u8) -> io::Result<Encoder> {
let max_bitwidth = cmp::min(
max_bitwidth,
ordinary_huffman_codes::calc_optimal_max_bitwidth(symbol_frequencies),
);
let code_bitwidthes = length_limited_huffman_codes::calc(max_bitwidth, symbol_frequencies);
Self::from_bitwidthes(&code_bitwidthes)
}
}
impl Builder for EncoderBuilder {
type Instance = Encoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert_eq!(self.table[symbol as usize], Code::new(0, 0));
self.table[symbol as usize] = code.inverse_endian();
Ok(())
}
fn finish(self) -> Self::Instance {
Encoder { table: self.table }
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
table: Vec<Code>,
}
impl Encoder {
#[inline(always)]
pub fn encode<W>(&self, writer: &mut bit::BitWriter<W>, symbol: u16) -> io::Result<()>
where
W: io::Write,
{
let code = self.lookup(symbol);
debug_assert_ne!(code, Code::new(0, 0));
writer.write_bits(code.width, code.bits)
}
#[inline(always)]
pub fn lookup(&self, symbol: u16) -> Code {
debug_assert!(
symbol < self.table.len() as u16,
"symbol:{}, table:{}",
symbol,
self.table.len()
);
self.table[symbol as usize].clone()
}
pub fn used_max_symbol(&self) -> Option<u16> {
self.table
.iter()
.rev()
.position(|x| x.width > 0)
.map(|trailing_zeros| (self.table.len() - 1 - trailing_zeros) as u16)
}
}
#[allow(dead_code)]
mod ordinary_huffman_codes {
use core::cmp;
use dary_heap::BinaryHeap;
pub fn calc_optimal_max_bitwidth(frequencies: &[usize]) -> u8 {
let mut heap = BinaryHeap::new();
for &freq in frequencies.iter().filter(|&&f| f > 0) {
let weight = -(freq as isize);
heap.push((weight, 0_u8));
}
while heap.len() > 1 {
let (weight1, width1) = heap.pop().unwrap();
let (weight2, width2) = heap.pop().unwrap();
heap.push((weight1 + weight2, 1 + cmp::max(width1, width2)));
}
let max_bitwidth = heap.pop().map_or(0, |x| x.1);
cmp::max(1, max_bitwidth)
}
}
mod length_limited_huffman_codes {
use alloc::{vec, vec::Vec};
use core::mem;
#[derive(Debug, Clone)]
struct Node {
symbols: Vec<u16>,
weight: usize,
}
impl Node {
pub fn empty() -> Self {
Node {
symbols: vec![],
weight: 0,
}
}
pub fn single(symbol: u16, weight: usize) -> Self {
Node {
symbols: vec![symbol],
weight,
}
}
pub fn merge(&mut self, other: Self) {
self.weight += other.weight;
self.symbols.extend(other.symbols);
}
}
/// Reference: [A Fast Algorithm for Optimal Length-Limited Huffman Codes][LenLimHuff.pdf]
///
/// [LenLimHuff.pdf]: https://www.ics.uci.edu/~dan/pubs/LenLimHuff.pdf
pub fn calc(max_bitwidth: u8, frequencies: &[usize]) -> Vec<u8> {
// NOTE: unoptimized implementation
let mut source = frequencies
.iter()
.enumerate()
.filter(|&(_, &f)| f > 0)
.map(|(symbol, &weight)| Node::single(symbol as u16, weight))
.collect::<Vec<_>>();
source.sort_by_key(|o| o.weight);
let weighted =
(0..max_bitwidth - 1).fold(source.clone(), |w, _| merge(package(w), source.clone()));
let mut code_bitwidthes = vec![0; frequencies.len()];
for symbol in package(weighted)
.into_iter()
.flat_map(|n| n.symbols.into_iter())
{
code_bitwidthes[symbol as usize] += 1;
}
code_bitwidthes
}
fn merge(x: Vec<Node>, y: Vec<Node>) -> Vec<Node> {
let mut z = Vec::with_capacity(x.len() + y.len());
let mut x = x.into_iter().peekable();
let mut y = y.into_iter().peekable();
loop {
let x_weight = x.peek().map(|s| s.weight);
let y_weight = y.peek().map(|s| s.weight);
if x_weight.is_none() {
z.extend(y);
break;
} else if y_weight.is_none() {
z.extend(x);
break;
} else if x_weight < y_weight {
z.push(x.next().unwrap());
} else {
z.push(y.next().unwrap());
}
}
z
}
fn package(mut nodes: Vec<Node>) -> Vec<Node> {
if nodes.len() >= 2 {
let new_len = nodes.len() / 2;
for i in 0..new_len {
nodes[i] = mem::replace(&mut nodes[i * 2], Node::empty());
let other = mem::replace(&mut nodes[i * 2 + 1], Node::empty());
nodes[i].merge(other);
}
nodes.truncate(new_len);
}
nodes
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
} | pub struct EncoderBuilder {
table: Vec<Code>,
}
impl EncoderBuilder {
pub fn new(symbol_count: usize) -> Self { | random_line_split |
huffman.rs | //! Length-limited Huffman Codes.
use crate::bit;
use alloc::{vec, vec::Vec};
use core::cmp;
use core2::io;
const MAX_BITWIDTH: u8 = 15;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Code {
pub width: u8,
pub bits: u16,
}
impl Code {
pub fn new(width: u8, bits: u16) -> Self {
debug_assert!(width <= MAX_BITWIDTH);
Code { width, bits }
}
fn inverse_endian(&self) -> Self {
let mut f = self.bits;
let mut t = 0;
for _ in 0..self.width {
t <<= 1;
t |= f & 1;
f >>= 1;
}
Code::new(self.width, t)
}
}
pub trait Builder: Sized {
type Instance;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()>;
fn finish(self) -> Self::Instance;
fn restore_canonical_huffman_codes(mut self, bitwidthes: &[u8]) -> io::Result<Self::Instance> {
debug_assert!(!bitwidthes.is_empty());
let mut symbols = bitwidthes
.iter()
.enumerate()
.filter(|&(_, &code_bitwidth)| code_bitwidth > 0)
.map(|(symbol, &code_bitwidth)| (symbol as u16, code_bitwidth))
.collect::<Vec<_>>();
symbols.sort_by_key(|x| x.1);
let mut code = 0;
let mut prev_width = 0;
for (symbol, bitwidth) in symbols {
code <<= bitwidth - prev_width;
self.set_mapping(symbol, Code::new(bitwidth, code))?;
code += 1;
prev_width = bitwidth;
}
Ok(self.finish())
}
}
pub struct DecoderBuilder {
table: Vec<u16>,
eob_symbol: Option<u16>,
safely_peek_bitwidth: Option<u8>,
max_bitwidth: u8,
}
impl DecoderBuilder {
pub fn new(
max_bitwidth: u8,
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> Self {
debug_assert!(max_bitwidth <= MAX_BITWIDTH);
DecoderBuilder {
table: vec![u16::from(MAX_BITWIDTH) + 1; 1 << max_bitwidth],
eob_symbol,
safely_peek_bitwidth,
max_bitwidth,
}
}
pub fn from_bitwidthes(
bitwidthes: &[u8],
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> io::Result<Decoder> {
let builder = Self::new(
bitwidthes.iter().cloned().max().unwrap_or(0),
safely_peek_bitwidth,
eob_symbol,
);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn safely_peek_bitwidth(&self) -> Option<u8> {
self.safely_peek_bitwidth
}
}
impl Builder for DecoderBuilder {
type Instance = Decoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert!(code.width <= self.max_bitwidth);
if Some(symbol) == self.eob_symbol {
self.safely_peek_bitwidth = Some(code.width);
}
// `bitwidth` encoded `to` value
let value = (symbol << 5) | u16::from(code.width);
// Sets the mapping to all possible indices
let code_be = code.inverse_endian();
for padding in 0..(1 << (self.max_bitwidth - code.width)) {
let i = ((padding << code.width) | code_be.bits) as usize;
if self.table[i] != u16::from(MAX_BITWIDTH) + 1 {
#[cfg(feature = "std")]
let message = format!(
"Bit region conflict: i={}, old_value={}, new_value={}, symbol={}, code={:?}",
i, self.table[i], value, symbol, code
);
#[cfg(not(feature = "std"))]
let message = "Bit region conflict";
return Err(io::Error::new(io::ErrorKind::InvalidData, message));
}
self.table[i] = value;
}
Ok(())
}
fn finish(self) -> Self::Instance {
Decoder {
table: self.table,
safely_peek_bitwidth: cmp::min(
self.max_bitwidth,
self.safely_peek_bitwidth.unwrap_or(1),
),
max_bitwidth: self.max_bitwidth,
}
}
}
#[derive(Debug)]
pub struct Decoder {
table: Vec<u16>,
safely_peek_bitwidth: u8,
max_bitwidth: u8,
}
impl Decoder {
pub fn safely_peek_bitwidth(&self) -> u8 {
self.safely_peek_bitwidth
}
#[inline(always)]
pub fn decode<R>(&self, reader: &mut bit::BitReader<R>) -> io::Result<u16>
where
R: io::Read,
{
let v = self.decode_unchecked(reader);
reader.check_last_error()?;
Ok(v)
}
#[inline(always)]
pub fn decode_unchecked<R>(&self, reader: &mut bit::BitReader<R>) -> u16
where
R: io::Read,
{
let mut value;
let mut bitwidth;
let mut peek_bitwidth = self.safely_peek_bitwidth;
loop {
let code = reader.peek_bits_unchecked(peek_bitwidth);
value = self.table[code as usize];
bitwidth = (value & 0b1_1111) as u8;
if bitwidth <= peek_bitwidth {
break;
}
if bitwidth > self.max_bitwidth {
reader.set_last_error(invalid_data_error!("Invalid huffman coded stream"));
break;
}
peek_bitwidth = bitwidth;
}
reader.skip_bits(bitwidth);
value >> 5
}
}
#[derive(Debug)]
pub struct EncoderBuilder {
table: Vec<Code>,
}
impl EncoderBuilder {
pub fn new(symbol_count: usize) -> Self {
EncoderBuilder {
table: vec![Code::new(0, 0); symbol_count],
}
}
pub fn from_bitwidthes(bitwidthes: &[u8]) -> io::Result<Encoder> {
let symbol_count = bitwidthes
.iter()
.enumerate()
.filter(|e| *e.1 > 0)
.last()
.map_or(0, |e| e.0)
+ 1;
let builder = Self::new(symbol_count);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn from_frequencies(symbol_frequencies: &[usize], max_bitwidth: u8) -> io::Result<Encoder> {
let max_bitwidth = cmp::min(
max_bitwidth,
ordinary_huffman_codes::calc_optimal_max_bitwidth(symbol_frequencies),
);
let code_bitwidthes = length_limited_huffman_codes::calc(max_bitwidth, symbol_frequencies);
Self::from_bitwidthes(&code_bitwidthes)
}
}
impl Builder for EncoderBuilder {
type Instance = Encoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> |
fn finish(self) -> Self::Instance {
Encoder { table: self.table }
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
table: Vec<Code>,
}
impl Encoder {
#[inline(always)]
pub fn encode<W>(&self, writer: &mut bit::BitWriter<W>, symbol: u16) -> io::Result<()>
where
W: io::Write,
{
let code = self.lookup(symbol);
debug_assert_ne!(code, Code::new(0, 0));
writer.write_bits(code.width, code.bits)
}
#[inline(always)]
pub fn lookup(&self, symbol: u16) -> Code {
debug_assert!(
symbol < self.table.len() as u16,
"symbol:{}, table:{}",
symbol,
self.table.len()
);
self.table[symbol as usize].clone()
}
pub fn used_max_symbol(&self) -> Option<u16> {
self.table
.iter()
.rev()
.position(|x| x.width > 0)
.map(|trailing_zeros| (self.table.len() - 1 - trailing_zeros) as u16)
}
}
#[allow(dead_code)]
mod ordinary_huffman_codes {
use core::cmp;
use dary_heap::BinaryHeap;
pub fn calc_optimal_max_bitwidth(frequencies: &[usize]) -> u8 {
let mut heap = BinaryHeap::new();
for &freq in frequencies.iter().filter(|&&f| f > 0) {
let weight = -(freq as isize);
heap.push((weight, 0_u8));
}
while heap.len() > 1 {
let (weight1, width1) = heap.pop().unwrap();
let (weight2, width2) = heap.pop().unwrap();
heap.push((weight1 + weight2, 1 + cmp::max(width1, width2)));
}
let max_bitwidth = heap.pop().map_or(0, |x| x.1);
cmp::max(1, max_bitwidth)
}
}
mod length_limited_huffman_codes {
use alloc::{vec, vec::Vec};
use core::mem;
#[derive(Debug, Clone)]
struct Node {
symbols: Vec<u16>,
weight: usize,
}
impl Node {
pub fn empty() -> Self {
Node {
symbols: vec![],
weight: 0,
}
}
pub fn single(symbol: u16, weight: usize) -> Self {
Node {
symbols: vec![symbol],
weight,
}
}
pub fn merge(&mut self, other: Self) {
self.weight += other.weight;
self.symbols.extend(other.symbols);
}
}
/// Reference: [A Fast Algorithm for Optimal Length-Limited Huffman Codes][LenLimHuff.pdf]
///
/// [LenLimHuff.pdf]: https://www.ics.uci.edu/~dan/pubs/LenLimHuff.pdf
pub fn calc(max_bitwidth: u8, frequencies: &[usize]) -> Vec<u8> {
// NOTE: unoptimized implementation
let mut source = frequencies
.iter()
.enumerate()
.filter(|&(_, &f)| f > 0)
.map(|(symbol, &weight)| Node::single(symbol as u16, weight))
.collect::<Vec<_>>();
source.sort_by_key(|o| o.weight);
let weighted =
(0..max_bitwidth - 1).fold(source.clone(), |w, _| merge(package(w), source.clone()));
let mut code_bitwidthes = vec![0; frequencies.len()];
for symbol in package(weighted)
.into_iter()
.flat_map(|n| n.symbols.into_iter())
{
code_bitwidthes[symbol as usize] += 1;
}
code_bitwidthes
}
fn merge(x: Vec<Node>, y: Vec<Node>) -> Vec<Node> {
let mut z = Vec::with_capacity(x.len() + y.len());
let mut x = x.into_iter().peekable();
let mut y = y.into_iter().peekable();
loop {
let x_weight = x.peek().map(|s| s.weight);
let y_weight = y.peek().map(|s| s.weight);
if x_weight.is_none() {
z.extend(y);
break;
} else if y_weight.is_none() {
z.extend(x);
break;
} else if x_weight < y_weight {
z.push(x.next().unwrap());
} else {
z.push(y.next().unwrap());
}
}
z
}
fn package(mut nodes: Vec<Node>) -> Vec<Node> {
if nodes.len() >= 2 {
let new_len = nodes.len() / 2;
for i in 0..new_len {
nodes[i] = mem::replace(&mut nodes[i * 2], Node::empty());
let other = mem::replace(&mut nodes[i * 2 + 1], Node::empty());
nodes[i].merge(other);
}
nodes.truncate(new_len);
}
nodes
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
}
| {
debug_assert_eq!(self.table[symbol as usize], Code::new(0, 0));
self.table[symbol as usize] = code.inverse_endian();
Ok(())
} | identifier_body |
views.py | from django.shortcuts import render,redirect,reverse
from django.views.generic import View
#导入只接受GET请求和POST请求的装饰器
from django.views.decorators.http import require_GET,require_POST
#导入form验证用的表单
from .forms import Alterform,EditAlterform,Reviewform
#导入Alter_manage的模型
from Apps.Alter_management.models import Alter_managment,Alter_managment_checked
#导入我们重构的resful文件,用于返回结果代码和消息,详细可以看resful.py文件
from utils import resful
#导入分页用的类
from django.core.paginator import Paginator
#导入时间分类
from datetime import datetime,timedelta
#将时间标记为清醒的时间
from django.utils.timezone import make_aware
#用于模糊查询
from django.db.models import Q
#用于拼接url
from urllib import parse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse,JsonResponse
from .admin import Alter_managment_resources
from Apps.Alterauth.decorators import Alter_login_required
#导入数据库字典和变更类型字典
from Apps.Alter_Dict.models import Alt_Database,Alt_Type
# Create your views here.
def login(request):
return render(request,'Alter_management/login.html')
def index_manage(request):
return render(request,"Alter_management/index.html")
# @require_GET#只接受GET请求
# # class Alter_manager_view(View):#变更管理页面,返回数据
# def Alter_manager_view (request):#变更管理页面,返回数据
# Alterd_datas=Alter_managment.objects.all()
# context={
# 'Alterd_datas':Alterd_datas
# }
# return render(request,"Alter_management/Alter.html",context=context)
# * @函数名: Alter_manager_newview
# * @功能描述: 变更管理页面视图
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 16:57:39
# * @最后编辑者: 郭军
#@staff_member_required(login_url='login')
@method_decorator(Alter_login_required,name='dispatch') | start=request.GET.get('start') #获取时间控件开始时间
end =request.GET.get('end') #获取时间控件结束时间
cxtj =request.GET.get('cxtj') #获取查询条件录入信息
#request.GET.get(参数,默认值)
#这个参数是只有没有传递参数的时候才会使用
#如果传递了,但是是一个空的字符串,也不会使用,那么可以使用 ('ReviewStatus',0) or 0
reviewStatus = int(request.GET.get('ReviewStatus',0)) #获取审核状态查询值,因为get到的都是字符串,转换成整形才能在页面中用数值对比
DatabaseType = int(request.GET.get('DatabaseType',0))
Alterd_datas = Alter_managment.objects.all().order_by('-modifytime')#获取所有数据库的数据
Databases = Alt_Database.objects.all()
AltTypes=Alt_Type.objects.all()
if start or end:#查询时间判断
if start:
start_time=datetime.strptime(start,'%Y/%m/%d')
else:
start_time = datetime(year=2019,month=5,day=1)#如果是空的 就使用默认值
if end:
#end_time = datetime.strptime(end, "%Y/%m/%d")
end_time = datetime.strptime(end, "%Y/%m/%d")+timedelta(hours=23,minutes=59,seconds=59)
else:
end_time=datetime.today()
#Alterd_datas=Alterd_datas.filter(modifytime__range=(make_aware(start_time), make_aware(end_time)))
Alterd_datas=Alterd_datas.filter(modifytime__range=(start_time, end_time))
if cxtj:#查询条件判断
#多条件模糊查询匹配,满足一个即可返回,用到Q对象格式如下
Alterd_datas=Alterd_datas.filter(Q(databaseid=cxtj)|Q(id=cxtj)|Q(altercontent__icontains=cxtj)|Q(altertypeid=cxtj)|Q(modifier__icontains=cxtj)|Q(associatedid__icontains=cxtj))
if DatabaseType:#数据库类型判断
Alterd_datas=Alterd_datas.filter(databaseid=DatabaseType)
if reviewStatus:#审核状态判断
Alterd_datas =Alterd_datas.filter(reviewstatus=reviewStatus)
paginator = Paginator(Alterd_datas, 2) # 分页用,表示每2条数据分一页
if paginator.num_pages < page:
page= paginator.num_pages
page_obj= paginator.page(page)#获取总页数
context_date =self.get_pagination_data(paginator,page_obj)#调用分页函数获取到页码
context = {
'Alterd_datas': page_obj.object_list,
'page_obj':page_obj,#将分了多少页的数据全部传过去
'paginator':page,#当前页数据
'start':start,
'end':end,
'cxtj':cxtj,
'reviewStatus':reviewStatus,
'DatabaseType':DatabaseType,
'Databases':Databases,
'AltTypes':AltTypes,
'url_query': '&'+parse.urlencode({
'start': start or '',
'end':end or '',
'cxtj':cxtj or '',
'reviewStatus':reviewStatus or 0,
'DatabaseType':DatabaseType or 0,
})#用于拼接url,让页面在查询后进行翻页,任然保留查询条件
}#返回包含分页信息的数据
context.update(context_date)#将分页数据更新到context,返回返回给页面
return render(request, "Alter_management/Alter.html", context=context)
#获取和分页功能
def get_pagination_data(self, paginator, page_obj, around_count=2):
current_page = page_obj.number
num_pages = paginator.num_pages
left_has_more = False
right_has_more = False
if current_page <= around_count + 2:
left_pages = range(1, current_page)
else:
left_has_more = True
left_pages = range(current_page - around_count, current_page)
if current_page >= num_pages - around_count - 1:
right_pages = range(current_page + 1, num_pages + 1)
else:
right_has_more = True
right_pages = range(current_page + 1, current_page + around_count + 1)
# current_page为当前页码数,count_page为每页显示数量
#strat = (current_page - 1) * count_page
start_num = (current_page - 1) * around_count
return {
# left_pages:代表的是当前这页的左边的页的页码
'left_pages': left_pages,
# right_pages:代表的是当前这页的右边的页的页码
'right_pages': right_pages,
'current_page': current_page,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'num_pages': num_pages,
'start_num':start_num
}
# * @函数名: edit_Alter_manager
# * @功能描述: 编辑变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:00:18
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
#@method_decorator(permission_required(perm='Alter_management.change_alter_managment',login_url='/'),name="dispatch")
def edit_Alter_manager(request):#变更内容编辑用
if request.user.has_perm('Alter_management.change_alter_managment'):
form =EditAlterform(request.POST)
if form.is_valid():
id=form.cleaned_data.get("id")#变更ID
AltType = form.cleaned_data.get("AltType") # '关联类型'#
AssociatedNumber =form.cleaned_data.get("AssociatedNumber") # '关联编号'#
Database = form.cleaned_data.get("Database") # '数据库'#
AlterContent =form.cleaned_data.get("AlterContent") # 变更内容
if request.user.pk ==Alter_managment.objects.get(id=id).userid:
Alter_managment.objects.filter(id=id).update(altertypeid=AltType, associatedid=AssociatedNumber, databaseid=Database, altercontent=AlterContent, modifier=request.user.username
,modifytime=datetime.now(),reviewstatus='0',userid=request.user.pk)
return resful.OK()
else:
return resful.unauth(message='您不能编辑别人的数据!')
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有编辑的权限!')
# * @函数名: delete_Alter_manager
# * @功能描述: 删除变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:01:02
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
def delete_Alter_manager(request):#变更内容删除用
if request.user.has_perm('Alter_management.change_alter_managment'):
id=request.POST.get("id")
try:
Alter_managment.objects.filter(id=id).delete()
Alter_managment_checked.objects.filter(alterid=id).delete()
return resful.OK()
except:
return resful.params_error(message="该变更不存在")
else:
return resful.unauth(message='您没有删除的权限!')
# * @函数名: add_Alter_managerView
# # * @功能描述: 添加变更内容
# # * @作者: 郭军
# # * @时间: 2019-6-30 15:28:19
# # * @最后编辑时间: 2019-9-3 10:00:36
# # * @最后编辑者: 郭军
class add_Alter_managerView(View):
def get(self,request):
Databases=Alt_Database.objects.all()
context={
'Databases':Databases
}
return render(request,'Alter_management/Alter.html',context=context)
def post(self,request):#添加变更内容
if request.user.has_perm('Alter_management.change_alter_managment'):
form = Alterform(request.POST)
#如果验证成功
if form.is_valid():
AltType_id=form.cleaned_data.get('AltType')
AltTypes = Alt_Type.objects.get(pk=AltType_id)
AssociatedNumber = form.cleaned_data.get('AssociatedNumber')
Database_id = form.cleaned_data.get('Database')
Database= Alt_Database.objects.get(pk=Database_id)
AlterContent=form.cleaned_data.get('AlterContent')
#判断变更内容在库中是否存在
exists=Alter_managment.objects.filter(altercontent=AlterContent).exists()
if not exists:
Alter_managment.objects.create(altertypeid=AltTypes.pk, associatedid=AssociatedNumber, databaseid=Database.pk,altercontent=AlterContent,
modifier=request.user.username,userid=request.user.pk)
return resful.OK()
else:
return resful.params_error(message="该变更内容已经存在!")
else:
error = form.get_error()
print(error)
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有添加变更的权限!')
# * @函数名: Review_Alter_manager
# * @功能描述: 变更审核
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
# @permission_required(perm= 'Alter_management.review_alter_managment',login_url='alter/Alter_manager/')
def Review_Alter_manager(request):#变更审核用
if request.user.has_perm('Alter_management.review_alter_managment'):
form =Reviewform(request.POST)
if form.is_valid():
id = form.cleaned_data.get('id')
ReviewStatus = form.cleaned_data.get('ReviewStatus') # '审核状态',
ReviewContent = form.cleaned_data.get('ReviewContent') # '审核内容',
#更新主表审核状态
Review=Alter_managment.objects.filter(id=id).update(reviewstatus=ReviewStatus, reviewcontent=ReviewContent, reviewer=request.user.username,reviewtime=datetime.now())
#判断主表是否审核成功
if Review:
#取得主表数据
alter_data = Alter_managment.objects.get(id=id)
#获取分表数据
alter_data_checked=Alter_managment_checked.objects.filter(alterid=id)
#判断分表是否有满足条件的数据并且审核状态是未审核
if alter_data_checked and ReviewStatus=='2':
#删除分表的数据
successdelete=alter_data_checked.delete()
if successdelete:
# 如果审核通过则复制创建主表数据到分表
return resful.OK()
else:
return resful.params_error(message='分数据删除失败')
elif alter_data_checked and ReviewStatus=='1':
Alter_managment_checked.objects.update(userid=alter_data.userid,alterid=alter_data.pk, associatedid=alter_data.associatedid,
altercontent=alter_data.altercontent,
modifier=alter_data.modifier,
modifytime=alter_data.modifytime,
reviewer=alter_data.reviewer,
reviewstatus=alter_data.reviewstatus,
reviewcontent=alter_data.reviewcontent,
reviewtime=alter_data.reviewtime,
altertypeid=alter_data.altertypeid,
databaseid=alter_data.databaseid)
return resful.OK()
else:
#如果审核通过则复制创建主表数据到分表
Alter_managment_checked.objects.create(userid=alter_data.userid,alterid=alter_data.pk,associatedid=alter_data.associatedid,altercontent=alter_data.altercontent,modifier=alter_data.modifier,modifytime=alter_data.modifytime,reviewer=alter_data.reviewer,reviewstatus=alter_data.reviewstatus,reviewcontent=alter_data.reviewcontent,reviewtime=alter_data.reviewtime,altertypeid=alter_data.altertypeid,databaseid=alter_data.databaseid)
return resful.OK()
else:
return resful.params_error(message='审核失败!')
return resful.OK()
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有审核的权限!')
# * @函数名: Alter_detail
# * @功能描述: 变更内容详情
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@Alter_login_required
def Alter_detail(request,id):#变更详情页面
Alterdeatil =Alter_managment.objects.get(id=id)
if Alterdeatil:
context = {
'Alterdeatil': Alterdeatil
}
return render(request,"Alter_management/Alter_detail.html",context=context)
else:
return resful.params_error(message='没有找到详情数据')
def test_review(request):
id=request.GET.get('id')
print('获取到的id是:',id)
datas=Alter_managment.objects.values('pk','reviewstatus','reviewcontent').filter(pk=id)
datas =list(datas)
data ={'code':200,'data':datas}
return JsonResponse(data,safe=False) | # @method_decorator(permission_required('Alter_management.change_alter_managment',login_url='/alter/index/'),name="dispatch")
class Alter_manager_newview(View):#变更管理页面,返回数据
def get(self,request):
#request.GET.get获取出来的数据都是字符串类型
page = int(request.GET.get('p',1))#获当前页数,并转换成整形,没有传默认为1 | random_line_split |
views.py | from django.shortcuts import render,redirect,reverse
from django.views.generic import View
#导入只接受GET请求和POST请求的装饰器
from django.views.decorators.http import require_GET,require_POST
#导入form验证用的表单
from .forms import Alterform,EditAlterform,Reviewform
#导入Alter_manage的模型
from Apps.Alter_management.models import Alter_managment,Alter_managment_checked
#导入我们重构的resful文件,用于返回结果代码和消息,详细可以看resful.py文件
from utils import resful
#导入分页用的类
from django.core.paginator import Paginator
#导入时间分类
from datetime import datetime,timedelta
#将时间标记为清醒的时间
from django.utils.timezone import make_aware
#用于模糊查询
from django.db.models import Q
#用于拼接url
from urllib import parse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse,JsonResponse
from .admin import Alter_managment_resources
from Apps.Alterauth.decorators import Alter_login_required
#导入数据库字典和变更类型字典
from Apps.Alter_Dict.models import Alt_Database,Alt_Type
# Create your views here.
def login(request):
return render(request,'Alter_management/login.html')
def index_manage(request):
return render(request,"Alter_management/index.html")
# @require_GET#只接受GET请求
# # class Alter_manager_v | iew):#变更管理页面,返回数据
# def Alter_manager_view (request):#变更管理页面,返回数据
# Alterd_datas=Alter_managment.objects.all()
# context={
# 'Alterd_datas':Alterd_datas
# }
# return render(request,"Alter_management/Alter.html",context=context)
# * @函数名: Alter_manager_newview
# * @功能描述: 变更管理页面视图
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 16:57:39
# * @最后编辑者: 郭军
#@staff_member_required(login_url='login')
@method_decorator(Alter_login_required,name='dispatch')
# @method_decorator(permission_required('Alter_management.change_alter_managment',login_url='/alter/index/'),name="dispatch")
class Alter_manager_newview(View):#变更管理页面,返回数据
def get(self,request):
#request.GET.get获取出来的数据都是字符串类型
page = int(request.GET.get('p',1))#获当前页数,并转换成整形,没有传默认为1
start=request.GET.get('start') #获取时间控件开始时间
end =request.GET.get('end') #获取时间控件结束时间
cxtj =request.GET.get('cxtj') #获取查询条件录入信息
#request.GET.get(参数,默认值)
#这个参数是只有没有传递参数的时候才会使用
#如果传递了,但是是一个空的字符串,也不会使用,那么可以使用 ('ReviewStatus',0) or 0
reviewStatus = int(request.GET.get('ReviewStatus',0)) #获取审核状态查询值,因为get到的都是字符串,转换成整形才能在页面中用数值对比
DatabaseType = int(request.GET.get('DatabaseType',0))
Alterd_datas = Alter_managment.objects.all().order_by('-modifytime')#获取所有数据库的数据
Databases = Alt_Database.objects.all()
AltTypes=Alt_Type.objects.all()
if start or end:#查询时间判断
if start:
start_time=datetime.strptime(start,'%Y/%m/%d')
else:
start_time = datetime(year=2019,month=5,day=1)#如果是空的 就使用默认值
if end:
#end_time = datetime.strptime(end, "%Y/%m/%d")
end_time = datetime.strptime(end, "%Y/%m/%d")+timedelta(hours=23,minutes=59,seconds=59)
else:
end_time=datetime.today()
#Alterd_datas=Alterd_datas.filter(modifytime__range=(make_aware(start_time), make_aware(end_time)))
Alterd_datas=Alterd_datas.filter(modifytime__range=(start_time, end_time))
if cxtj:#查询条件判断
#多条件模糊查询匹配,满足一个即可返回,用到Q对象格式如下
Alterd_datas=Alterd_datas.filter(Q(databaseid=cxtj)|Q(id=cxtj)|Q(altercontent__icontains=cxtj)|Q(altertypeid=cxtj)|Q(modifier__icontains=cxtj)|Q(associatedid__icontains=cxtj))
if DatabaseType:#数据库类型判断
Alterd_datas=Alterd_datas.filter(databaseid=DatabaseType)
if reviewStatus:#审核状态判断
Alterd_datas =Alterd_datas.filter(reviewstatus=reviewStatus)
paginator = Paginator(Alterd_datas, 2) # 分页用,表示每2条数据分一页
if paginator.num_pages < page:
page= paginator.num_pages
page_obj= paginator.page(page)#获取总页数
context_date =self.get_pagination_data(paginator,page_obj)#调用分页函数获取到页码
context = {
'Alterd_datas': page_obj.object_list,
'page_obj':page_obj,#将分了多少页的数据全部传过去
'paginator':page,#当前页数据
'start':start,
'end':end,
'cxtj':cxtj,
'reviewStatus':reviewStatus,
'DatabaseType':DatabaseType,
'Databases':Databases,
'AltTypes':AltTypes,
'url_query': '&'+parse.urlencode({
'start': start or '',
'end':end or '',
'cxtj':cxtj or '',
'reviewStatus':reviewStatus or 0,
'DatabaseType':DatabaseType or 0,
})#用于拼接url,让页面在查询后进行翻页,任然保留查询条件
}#返回包含分页信息的数据
context.update(context_date)#将分页数据更新到context,返回返回给页面
return render(request, "Alter_management/Alter.html", context=context)
#获取和分页功能
def get_pagination_data(self, paginator, page_obj, around_count=2):
current_page = page_obj.number
num_pages = paginator.num_pages
left_has_more = False
right_has_more = False
if current_page <= around_count + 2:
left_pages = range(1, current_page)
else:
left_has_more = True
left_pages = range(current_page - around_count, current_page)
if current_page >= num_pages - around_count - 1:
right_pages = range(current_page + 1, num_pages + 1)
else:
right_has_more = True
right_pages = range(current_page + 1, current_page + around_count + 1)
# current_page为当前页码数,count_page为每页显示数量
#strat = (current_page - 1) * count_page
start_num = (current_page - 1) * around_count
return {
# left_pages:代表的是当前这页的左边的页的页码
'left_pages': left_pages,
# right_pages:代表的是当前这页的右边的页的页码
'right_pages': right_pages,
'current_page': current_page,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'num_pages': num_pages,
'start_num':start_num
}
# * @函数名: edit_Alter_manager
# * @功能描述: 编辑变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:00:18
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
#@method_decorator(permission_required(perm='Alter_management.change_alter_managment',login_url='/'),name="dispatch")
def edit_Alter_manager(request):#变更内容编辑用
if request.user.has_perm('Alter_management.change_alter_managment'):
form =EditAlterform(request.POST)
if form.is_valid():
id=form.cleaned_data.get("id")#变更ID
AltType = form.cleaned_data.get("AltType") # '关联类型'#
AssociatedNumber =form.cleaned_data.get("AssociatedNumber") # '关联编号'#
Database = form.cleaned_data.get("Database") # '数据库'#
AlterContent =form.cleaned_data.get("AlterContent") # 变更内容
if request.user.pk ==Alter_managment.objects.get(id=id).userid:
Alter_managment.objects.filter(id=id).update(altertypeid=AltType, associatedid=AssociatedNumber, databaseid=Database, altercontent=AlterContent, modifier=request.user.username
,modifytime=datetime.now(),reviewstatus='0',userid=request.user.pk)
return resful.OK()
else:
return resful.unauth(message='您不能编辑别人的数据!')
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有编辑的权限!')
# * @函数名: delete_Alter_manager
# * @功能描述: 删除变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:01:02
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
def delete_Alter_manager(request):#变更内容删除用
if request.user.has_perm('Alter_management.change_alter_managment'):
id=request.POST.get("id")
try:
Alter_managment.objects.filter(id=id).delete()
Alter_managment_checked.objects.filter(alterid=id).delete()
return resful.OK()
except:
return resful.params_error(message="该变更不存在")
else:
return resful.unauth(message='您没有删除的权限!')
# * @函数名: add_Alter_managerView
# # * @功能描述: 添加变更内容
# # * @作者: 郭军
# # * @时间: 2019-6-30 15:28:19
# # * @最后编辑时间: 2019-9-3 10:00:36
# # * @最后编辑者: 郭军
class add_Alter_managerView(View):
def get(self,request):
Databases=Alt_Database.objects.all()
context={
'Databases':Databases
}
return render(request,'Alter_management/Alter.html',context=context)
def post(self,request):#添加变更内容
if request.user.has_perm('Alter_management.change_alter_managment'):
form = Alterform(request.POST)
#如果验证成功
if form.is_valid():
AltType_id=form.cleaned_data.get('AltType')
AltTypes = Alt_Type.objects.get(pk=AltType_id)
AssociatedNumber = form.cleaned_data.get('AssociatedNumber')
Database_id = form.cleaned_data.get('Database')
Database= Alt_Database.objects.get(pk=Database_id)
AlterContent=form.cleaned_data.get('AlterContent')
#判断变更内容在库中是否存在
exists=Alter_managment.objects.filter(altercontent=AlterContent).exists()
if not exists:
Alter_managment.objects.create(altertypeid=AltTypes.pk, associatedid=AssociatedNumber, databaseid=Database.pk,altercontent=AlterContent,
modifier=request.user.username,userid=request.user.pk)
return resful.OK()
else:
return resful.params_error(message="该变更内容已经存在!")
else:
error = form.get_error()
print(error)
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有添加变更的权限!')
# * @函数名: Review_Alter_manager
# * @功能描述: 变更审核
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
# @permission_required(perm= 'Alter_management.review_alter_managment',login_url='alter/Alter_manager/')
def Review_Alter_manager(request):#变更审核用
if request.user.has_perm('Alter_management.review_alter_managment'):
form =Reviewform(request.POST)
if form.is_valid():
id = form.cleaned_data.get('id')
ReviewStatus = form.cleaned_data.get('ReviewStatus') # '审核状态',
ReviewContent = form.cleaned_data.get('ReviewContent') # '审核内容',
#更新主表审核状态
Review=Alter_managment.objects.filter(id=id).update(reviewstatus=ReviewStatus, reviewcontent=ReviewContent, reviewer=request.user.username,reviewtime=datetime.now())
#判断主表是否审核成功
if Review:
#取得主表数据
alter_data = Alter_managment.objects.get(id=id)
#获取分表数据
alter_data_checked=Alter_managment_checked.objects.filter(alterid=id)
#判断分表是否有满足条件的数据并且审核状态是未审核
if alter_data_checked and ReviewStatus=='2':
#删除分表的数据
successdelete=alter_data_checked.delete()
if successdelete:
# 如果审核通过则复制创建主表数据到分表
return resful.OK()
else:
return resful.params_error(message='分数据删除失败')
elif alter_data_checked and ReviewStatus=='1':
Alter_managment_checked.objects.update(userid=alter_data.userid,alterid=alter_data.pk, associatedid=alter_data.associatedid,
altercontent=alter_data.altercontent,
modifier=alter_data.modifier,
modifytime=alter_data.modifytime,
reviewer=alter_data.reviewer,
reviewstatus=alter_data.reviewstatus,
reviewcontent=alter_data.reviewcontent,
reviewtime=alter_data.reviewtime,
altertypeid=alter_data.altertypeid,
databaseid=alter_data.databaseid)
return resful.OK()
else:
#如果审核通过则复制创建主表数据到分表
Alter_managment_checked.objects.create(userid=alter_data.userid,alterid=alter_data.pk,associatedid=alter_data.associatedid,altercontent=alter_data.altercontent,modifier=alter_data.modifier,modifytime=alter_data.modifytime,reviewer=alter_data.reviewer,reviewstatus=alter_data.reviewstatus,reviewcontent=alter_data.reviewcontent,reviewtime=alter_data.reviewtime,altertypeid=alter_data.altertypeid,databaseid=alter_data.databaseid)
return resful.OK()
else:
return resful.params_error(message='审核失败!')
return resful.OK()
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有审核的权限!')
# * @函数名: Alter_detail
# * @功能描述: 变更内容详情
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@Alter_login_required
def Alter_detail(request,id):#变更详情页面
Alterdeatil =Alter_managment.objects.get(id=id)
if Alterdeatil:
context = {
'Alterdeatil': Alterdeatil
}
return render(request,"Alter_management/Alter_detail.html",context=context)
else:
return resful.params_error(message='没有找到详情数据')
def test_review(request):
id=request.GET.get('id')
print('获取到的id是:',id)
datas=Alter_managment.objects.values('pk','reviewstatus','reviewcontent').filter(pk=id)
datas =list(datas)
data ={'code':200,'data':datas}
return JsonResponse(data,safe=False)
| iew(V | identifier_name |
views.py | from django.shortcuts import render,redirect,reverse
from django.views.generic import View
#导入只接受GET请求和POST请求的装饰器
from django.views.decorators.http import require_GET,require_POST
#导入form验证用的表单
from .forms import Alterform,EditAlterform,Reviewform
#导入Alter_manage的模型
from Apps.Alter_management.models import Alter_managment,Alter_managment_checked
#导入我们重构的resful文件,用于返回结果代码和消息,详细可以看resful.py文件
from utils import resful
#导入分页用的类
from django.core.paginator import Paginator
#导入时间分类
from datetime import datetime,timedelta
#将时间标记为清醒的时间
from django.utils.timezone import make_aware
#用于模糊查询
from django.db.models import Q
#用于拼接url
from urllib import parse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse,JsonResponse
from .admin import Alter_managment_resources
from Apps.Alterauth.decorators import Alter_login_required
#导入数据库字典和变更类型字典
from Apps.Alter_Dict.models import Alt_Database,Alt_Type
# Create your views here.
def login(request):
return render(request,'Alter_management/login.html')
def index_manage(request):
return render(request,"Alter_management/index.html")
# @require_GET#只接受GET请求
# # class Alter_manager_view(View):#变更管理页面,返回数据
# def Alter_manager_view (request):#变更管理页面,返回数据
# Alterd_datas=Alter_managment.objects.all()
# context={
# 'Alterd_datas':Alterd_datas
# }
# return render(request,"Alter_management/Alter.html",context=context)
# * @函数名: Alter_manager_newview
# * @功能描述: 变更管理页面视图
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 16:57:39
# * @最后编辑者: 郭军
#@staff_member_required(login_url='login')
@method_decorator(Alter_login_required,name='dispatch')
# @method_decorator(permission_required('Alter_management.change_alter_managment',login_url='/alter/index/'),name="dispatch")
class Alter_manager_newview(View):#变更管理页面,返回数据
def get(self,request):
#request.GET.get获取出来的数据都是字符串类型
page = int(request.GET.get('p',1))#获当前页数,并转换成整形,没有传默认为1
start=request.GET.get('start') #获取时间控件开始时间
end =request.GET.get('end') #获取时间控件结束时间
cxtj =request.GET.get('cxtj') #获取查询条件录入信息
#request.GET.get(参数,默认值)
#这个参数是只有没有传递参数的时候才会使用
#如果传递了,但是是一个空的字符串,也不会使用,那么可以使用 ('ReviewStatus',0) or 0
reviewStatus = int(request.GET.get('ReviewStatus',0)) #获取审核状态查询值,因为get到的都是字符串,转换成整形才能在页面中用数值对比
DatabaseType = int(request.GET.get('DatabaseType',0))
Alterd_datas = Alter_managment.objects.all().order_by('-modifytime')#获取所有数据库的数据
Databases = Alt_Database.objects.all()
AltTypes=Alt_Type.objects.all()
if start or end:#查询时间判断
if start:
start_time=datetime.strptime(start,'%Y/%m/%d')
else:
start_time = datetime(year=2019,month=5,day=1)#如果是空的 就使用默认值
if end:
#end_time = datetime.strptime(end, "%Y/%m/%d")
end_time = datetime.strptime(end, "%Y/%m/%d")+timedelta(hours=23,minutes=59,seconds=59)
else:
end_time=datetime.today()
#Alterd_datas=Alterd_datas.filter(modifytime__range=(make_aware(start_time), make_aware(end_time)))
Alterd_datas=Alterd_datas.filter(modifytime__range=(start_time, end_time))
if cxtj:#查询条件判断
#多条件模糊查询匹配,满足一个即可返回,用到Q对象格式如下
Alterd_datas=Alterd_datas.filter(Q(databaseid=cxtj)|Q(id=cxtj)|Q(altercontent__icontains=cxtj)|Q(altertypeid=cxtj)|Q(modifier__icontains=cxtj)|Q(associatedid__icontains=cxtj))
if DatabaseType:#数据库类型判断
Alterd_datas=Alterd_datas.filter(databaseid=DatabaseType)
if reviewStatus:#审核状态判断
Alterd_datas =Alterd_datas.filter(reviewstatus=reviewStatus)
paginator = Paginator(Alterd_datas, 2) # 分页用,表示每2条数据分一页
if paginator.num_pages < page:
page= paginator.num_pages
page_obj= paginator.page(page)#获取总页数
context_date =self.get_pagination_data(paginator,page_obj)#调用分页函数获取到页码
context = {
'Alterd_datas': page_obj.object_list,
'page_obj':page_obj,#将分了多少页的数据全部传过去
'paginator':page,#当前页数据
'start':start,
'end':end,
'cxtj':cxtj,
'reviewStatus':reviewStatus,
'DatabaseType':DatabaseType,
'Databases':Databases,
'AltTypes':AltTypes,
'url_query': '&'+parse.urlencode({
'start': start or '',
'end':end or '',
'cxtj':cxtj or '',
'reviewStatus':reviewStatus or 0,
'DatabaseType':DatabaseType or 0,
})#用于拼接url,让页面在查询后进行翻页,任然保留查询条件
}#返回包含分页信息的数据
context.update(context_date)#将分页数据更新到context,返回返回给页面
return render(request, "Alter_management/Alter.html", context=context)
#获取和分页功能
def get_pagination_data(self, paginator, page_obj, around_count=2):
current_page = page_obj.number
num_pages = paginator.num_pages
left_has_more = False
right_has_more = False
if current_page <= around_count + 2:
left_pages = range(1, current_page)
else:
left_has_more = True
left_pages = range(current_page - around_count, current_page)
if current_page >= num_pages - around_count - 1:
right_pages = range(current_page + 1, num_pages + 1)
else:
right_has_more = True
right_pages = range(current_page + 1, current_page + around_count + 1)
# current_page为当前页码数,count_page为每页显示数量
#strat = (current_page - 1) * count_page
start_num = (current_page - 1) * around_count
return {
# left_pages:代表的是当前这页的左边的页的页码
'left_pages': left_pages,
# right_pages:代表的是当前这页的右边的页的页码
'right_pages': right_pages,
'current_page': current_page,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'num_pages': num_pages,
'start_num':start_num
}
# * @函数名: edit_Alter_manager
# * @功能描述: 编辑变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:00:18
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
#@method_decorator(permission_required(perm='Alter_management.change_alter_managment',login_url='/'),name="dispatch")
def edit_Alter_manager(request):#变更内容编辑用
if request.user.has_perm('Alter_management.change_alter_managment'):
form =EditAlterform(request.POST)
if form.is_valid():
id=form.cleaned_data.get("id")#变更ID
AltType = form.cleaned_data.get("AltType") # '关联类型'#
AssociatedNumber =form.cleaned_data.get("AssociatedNumber") # '关联编号'#
Database = form.cleaned_data.get("Database") # '数据库'#
AlterContent =form.cleaned_data.get("AlterContent") # 变更内容
if request.user.pk ==Alter_managment.objects.get(id=id).userid:
Alter_managment.objects.filter(id=id).update(altertypeid=AltType, associatedid=AssociatedNumber, databaseid=Database, altercontent=AlterContent, modifier=request.user.username
,modifytime=datetime.now(),reviewstatus='0',userid=request.user.pk)
return resful.OK()
else:
return resful.unauth(message='您不能编辑别人的数据!')
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有编辑的权限!')
# * @函数名: delete_Alter_manager
# * @功能描述: 删除变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:01:02
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
def delete_Alter_manager(request):#变更内容删除用
if request.user.has_perm('Alter_management.change_alter_managment'):
id=request.POST.get("id")
try:
Alter_managment.objects.filter(id=id).delete()
Alter_managment_checked.objects.filter(alterid=id).delete()
return resful.OK()
except:
return resful.params_error(message="该变更不存在")
else:
return resful.unauth(message='您没有删除的权限!')
# * @函数名: add_Alter_managerView
# # * @功能描述: 添加变更内容
# # * @作者: 郭军
# # * @时间: 2019-6-30 15:28:19
# # * @最后编辑时间: 2019-9-3 10:00:36
# # * @最后编辑者: 郭军
class add_Alter_managerView(View):
def get(self,request):
Databases=Alt_Database.objects.all()
context={
'Databases':Databases
}
return render(request,'Alter_management/Alter.html',context=context)
def post(self,request):#添加变更内容
if request.user.has_perm('Alter_management.change_alter_managment'):
form = Alterform(request.POST)
#如果验证成功
if form.is_valid():
AltType_id=form.cleaned_data.get('AltType')
AltTypes = Alt_Type.objects.get(pk=AltType_id)
AssociatedNumber = form.cleaned_data.get('AssociatedNumber')
Database_id = form.cleaned_data.get('Database')
Database= Alt_Database.objects.get(pk=Database_id)
AlterContent=form.cleaned_data.get('AlterContent')
#判断变更内容在库中是否存在
exists=Alter_managment.objects.filter(altercontent=AlterContent).exists()
if not exists:
Alter_managment.objects.create(altertypeid=AltTypes.pk, associatedid=AssociatedNumber, databaseid=Database.pk,altercontent=AlterContent,
modifier=request.user.username,userid=request.user.pk)
return resful.OK()
else:
return resful.params_error(message="该变更内容已经存在!")
else:
error = form.get_error()
print(error)
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有添加变更的权限!')
# * @函数名: Review_Alter_manager
# * @功能描述: 变更审核
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
# @permission_required(perm= 'Alter_management.review_alter_managment',login_url='alter/Alter_manager/')
def Review_Alter_manager(request):#变更审核用
if request.user.has_perm('Alter_management.review_alter_managment'):
form =Reviewform(request.POST)
if form.is_valid():
id = form.cleaned_data.get('id')
ReviewStatus = form.cleaned_data.get('ReviewStatus') # '审核状态',
ReviewContent = form.cleaned_data.get('ReviewContent') # '审核内容',
#更新主表审核状态
Review=Alter_managment.objects.filter(id=id).update(reviewstatus=ReviewStatus, reviewcontent=ReviewContent, reviewer=request.user.username,reviewtime=datetime.now())
#判断主表是否审核成功
if Review:
#取得主表数据
alter_data = Alter_managment.objects.get(id=id)
#获取分表数据
alter_data_checked=Alter_managment_checked.objects.filter(alterid=id)
#判断分表是否有满足条件的数据并且审核状态是未审核
if alter_data_checked and ReviewStatus=='2':
#删除分表的数据
successdelete=alter_data_checked.delete()
if successdelete:
# 如果审核通过则复制创建主表数据到分表
return resful.OK()
else:
return resful.params_error(message='分数据删除失败')
elif alter_data_checked and ReviewStatus=='1':
Alter_managment_checked.objects.update(userid=alter_data.userid,alterid=alter_data.pk, associatedid=alter_data.associatedid,
altercontent=alter_data.altercontent,
modifier=alter_data.modifier,
modifytime=alter_data.modifytime,
reviewer=alter_data.reviewer,
reviewstatus=alter_data.reviewstatus,
reviewcontent=alter_data.reviewcontent,
reviewtime=alter_data.reviewtime,
altertypeid=alter_data.altertypeid,
databaseid=alter_data.databaseid)
return resful.OK()
else:
#如果审核通过则复制创建主表数据到分表
Alter_managment_checked.objects.create(userid=alter_data.userid,alterid=alter_data.pk,associatedid=alter_data.associatedid,altercontent=alter_data.altercontent,modifier=alter_data.modifier,modifytime=alter_data.modifytime,reviewer=alter_data.reviewer,reviewstatus=alter_data.reviewstatus,reviewcontent=alter_data.reviewcontent,reviewtime=alter_data.reviewtime,altertypeid=alter_data.altertypeid,databaseid=alter_data.databaseid)
return resful.OK()
else:
return resful.params_error(message='审核失败!')
return resful.OK()
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有审核的权限!')
# * @函数名: Alter_detail
# * @功能描述: 变更内容详情
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@Alter_login_required
def Alter_detail(request,id):#变更详情页面
Alterdeatil =Alter_managment.objects.get(id=id)
if Alterdeatil:
context = {
'Alterdeatil': Alterdeatil
}
return render(request,"Alter_management/Alter_detail.html",context=context)
else:
return resful.params_error(message='没有找到详情数据')
def test_review(request):
id=request.GET.get('id')
print('获取到的id是:',id)
datas=Alter_managment.objects.values('pk','reviewstatus','reviewcontent').filter(pk=id)
datas =list(datas)
data ={'code':200,'data':datas}
return JsonResponse(data,safe=False)
| conditional_block | ||
views.py | from django.shortcuts import render,redirect,reverse
from django.views.generic import View
#导入只接受GET请求和POST请求的装饰器
from django.views.decorators.http import require_GET,require_POST
#导入form验证用的表单
from .forms import Alterform,EditAlterform,Reviewform
#导入Alter_manage的模型
from Apps.Alter_management.models import Alter_managment,Alter_managment_checked
#导入我们重构的resful文件,用于返回结果代码和消息,详细可以看resful.py文件
from utils import resful
#导入分页用的类
from django.core.paginator import Paginator
#导入时间分类
from datetime import datetime,timedelta
#将时间标记为清醒的时间
from django.utils.timezone import make_aware
#用于模糊查询
from django.db.models import Q
#用于拼接url
from urllib import parse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse,JsonResponse
from .admin import Alter_managment_resources
from Apps.Alterauth.decorators import Alter_login_required
#导入数据库字典和变更类型字典
from Apps.Alter_Dict.models import Alt_Database,Alt_Type
# Create your views here.
def login(request):
return render(request,'Alter_management/login.html')
def index_manage(request):
return render(request,"Alter_management/index.html")
# @require_GET#只接受GET请求
# # class Alter_manager_view(View):#变更管理页面,返回数据
# def Alter_manager_view (request):#变更管理页面,返回数据
# Alterd_datas=Alter_managment.objects.all()
# context={
# 'Alterd_datas':Alterd_datas
# }
# return render(request,"Alter_management/Alter.html",context=context)
# * @函数名: Alter_manager_newview
# * @功能描述: 变更管理页面视图
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 16:57:39
# * @最后编辑者: 郭军
#@staff_member_required(login_url='login')
@method_decorator(Alter_login_required,name='dispatch')
# @method_decorator(permission_required('Alter_management.change_alter_managment',login_url='/alter/index/'),name="dispatch")
class Alter_manager_newview(View):#变更管理页面,返回数据
def get(self,request):
#request.GET.get获取出来的数据都是字符串类型
page = int(request.GET.get('p',1))#获当前页数,并转换成整形,没有传默认为1
start=request.GET.get('start') #获取时间控件开始时间
end =request.GET.get('end') #获取时间控件结束时间
cxtj =request.GET.get('cxtj') #获取查询条件录入信息
#request.GET.get(参数,默认值)
#这个参数是只有没有传递参数的时候才会使用
#如果传递了,但是是一个空的字符串,也不会使用,那么可以使用 ('ReviewStatus',0) or 0
reviewStatus = int(request.GET.get('ReviewStatus',0)) #获取审核状态查询值,因为get到的都是字符串,转换成整形才能在页面中用数值对比
DatabaseType = int(request.GET.get('DatabaseType',0))
Alterd_datas = Alter_managment.objects.all().order_by('-modifytime')#获取所有数据库的数据
Databases = Alt_Database.objects.all()
AltTypes=Alt_Type.objects.all()
if start or end:#查询时间判断
if start:
start_time=datetime.strptime(start,'%Y/%m/%d')
else:
start_time = datetime(year=2019,month=5,day=1)#如果是空的 就使用默认值
if end:
#end_time = datetime.strptime(end, "%Y/%m/%d")
end_time = datetime.strptime(end, "%Y/%m/%d")+timedelta(hours=23,minutes=59,seconds=59)
else:
end_time=datetime.today()
#Alterd_datas=Alterd_datas.filter(modifytime__range=(make_aware(start_time), make_aware(end_time)))
Alterd_datas=Alterd_datas.filter(modifytime__range=(start_time, end_time))
if cxtj:#查询条件判断
#多条件模糊查询匹配,满足一个即可返回,用到Q对象格式如下
Alterd_datas=Alterd_datas.filter(Q(databaseid=cxtj)|Q(id=cxtj)|Q(altercontent__icontains=cxtj)|Q(altertypeid=cxtj)|Q(modifier__icontains=cxtj)|Q(associatedid__icontains=cxtj))
if DatabaseType:#数据库类型判断
Alterd_datas=Alterd_datas.filter(databaseid=DatabaseType)
if reviewStatus:#审核状态判断
Alterd_datas =Alterd_datas.filter(reviewstatus=reviewStatus)
paginator = Paginator(Alterd_datas, 2) # 分页用,表示每2条数据分一页
if paginator.num_pages < page:
page= paginator.num_pages
page_obj= paginator.page(page)#获取总页数
context_date =self.get_pagination_data(paginator,page_obj)#调用分页函数获取到页码
context = {
'Alterd_datas': page_obj.object_list,
'page_obj':page_obj,#将分了多少页的数据全部传过去
'paginator':page,#当前页数据
'start':start,
'end':end,
'cxtj':cxtj,
'reviewStatus':reviewStatus,
'DatabaseType':DatabaseType,
'Databases':Databases,
'AltTypes':AltTypes,
'url_query': '&'+parse.urlencode({
'start': start or '',
'end':end or '',
'cxtj':cxtj or '',
'reviewStatus':reviewStatus or 0,
'DatabaseType':DatabaseType or 0,
})#用于拼接url,让页面在查询后进行翻页,任然保留查询条件
}#返回包含分页信息的数据
context.update(context_date)#将分页数据更新到context,返回返回给页面
return render(request, "Alter_management/Alter.html", context=context)
#获取和分页功能
def get_pagination_data(self, paginator, page_obj, around_count=2):
current_page = page_obj.number
num_pages = paginator.num_pages
left_has_more = False
right_has_more = False
if current_page <= around_count + 2:
left_pages = range(1, current_page)
else:
left_has_more = True
left_pages = range(current_page - around_count, current_page)
if current_page >= num_pages - around_count - 1:
right_pages = range(current_page + 1, num_pages + 1)
else:
right_has_more = True
right_pages = range(current_page + 1, current_page + around_count + 1)
# current_page为当前页码数,count_page为每页显示数量
#strat = (current_page - 1) * count_page
start_num = (current_page - 1) * around_count
return {
# left_pages:代表的是当前这页的左边的页的页码
'left_pages': left_pages,
# right_pages:代表的是当前这页的右边的页的页码
'right_pages': right_pages,
'current_page': current_page,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'num_pages': num_pages,
'start_num':start_num
}
# * @函数名: edit_Alter_manager
# * @功能描述: 编辑变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:00:18
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
#@method_decorator(permission_required(perm='Alter_management.change_alter_managment',login_url='/'),name="dispatch")
def edit_Alter_manager(request):#变更内容编辑用
if request.user.has_perm('Alter_management.change_alter_managment'):
form =EditAlterform(request.POST)
if form.is_valid():
id=form.cleaned_data.get("id")#变更ID
AltType = form.cleaned_data.get("AltType") # '关联类型'#
AssociatedNumber =form.cleaned_data.get("AssociatedNumber") # '关联编号'#
Database = form.cleaned_data.get("Database") # '数据库'#
AlterContent =form.cleaned_data.get("AlterContent") # 变更内容
if request.user.pk ==Alter_managment.objects.get(id=id).userid:
Alter_managment.objects.filter(id=id).update(altertypeid=AltType, associatedid=AssociatedNumber, databaseid=Database, altercontent=AlterContent, modifier=request.user.username
,modifytime=datetime.now(),reviewstatus='0',userid=request.user.pk)
return resful.OK()
else:
return resful.unauth(message='您不能编辑别人的数据!')
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有编辑的权限!')
# * @函数名: delete_Alter_manager
# * @功能描述: 删除变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:0 | AltType_id=form.cleaned_data.get('AltType')
AltTypes = Alt_Type.objects.get(pk=AltType_id)
AssociatedNumber = form.cleaned_data.get('AssociatedNumber')
Database_id = form.cleaned_data.get('Database')
Database= Alt_Database.objects.get(pk=Database_id)
AlterContent=form.cleaned_data.get('AlterContent')
#判断变更内容在库中是否存在
exists=Alter_managment.objects.filter(altercontent=AlterContent).exists()
if not exists:
Alter_managment.objects.create(altertypeid=AltTypes.pk, associatedid=AssociatedNumber, databaseid=Database.pk,altercontent=AlterContent,
modifier=request.user.username,userid=request.user.pk)
return resful.OK()
else:
return resful.params_error(message="该变更内容已经存在!")
else:
error = form.get_error()
print(error)
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有添加变更的权限!')
# * @函数名: Review_Alter_manager
# * @功能描述: 变更审核
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
# @permission_required(perm= 'Alter_management.review_alter_managment',login_url='alter/Alter_manager/')
def Review_Alter_manager(request):#变更审核用
if request.user.has_perm('Alter_management.review_alter_managment'):
form =Reviewform(request.POST)
if form.is_valid():
id = form.cleaned_data.get('id')
ReviewStatus = form.cleaned_data.get('ReviewStatus') # '审核状态',
ReviewContent = form.cleaned_data.get('ReviewContent') # '审核内容',
#更新主表审核状态
Review=Alter_managment.objects.filter(id=id).update(reviewstatus=ReviewStatus, reviewcontent=ReviewContent, reviewer=request.user.username,reviewtime=datetime.now())
#判断主表是否审核成功
if Review:
#取得主表数据
alter_data = Alter_managment.objects.get(id=id)
#获取分表数据
alter_data_checked=Alter_managment_checked.objects.filter(alterid=id)
#判断分表是否有满足条件的数据并且审核状态是未审核
if alter_data_checked and ReviewStatus=='2':
#删除分表的数据
successdelete=alter_data_checked.delete()
if successdelete:
# 如果审核通过则复制创建主表数据到分表
return resful.OK()
else:
return resful.params_error(message='分数据删除失败')
elif alter_data_checked and ReviewStatus=='1':
Alter_managment_checked.objects.update(userid=alter_data.userid,alterid=alter_data.pk, associatedid=alter_data.associatedid,
altercontent=alter_data.altercontent,
modifier=alter_data.modifier,
modifytime=alter_data.modifytime,
reviewer=alter_data.reviewer,
reviewstatus=alter_data.reviewstatus,
reviewcontent=alter_data.reviewcontent,
reviewtime=alter_data.reviewtime,
altertypeid=alter_data.altertypeid,
databaseid=alter_data.databaseid)
return resful.OK()
else:
#如果审核通过则复制创建主表数据到分表
Alter_managment_checked.objects.create(userid=alter_data.userid,alterid=alter_data.pk,associatedid=alter_data.associatedid,altercontent=alter_data.altercontent,modifier=alter_data.modifier,modifytime=alter_data.modifytime,reviewer=alter_data.reviewer,reviewstatus=alter_data.reviewstatus,reviewcontent=alter_data.reviewcontent,reviewtime=alter_data.reviewtime,altertypeid=alter_data.altertypeid,databaseid=alter_data.databaseid)
return resful.OK()
else:
return resful.params_error(message='审核失败!')
return resful.OK()
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有审核的权限!')
# * @函数名: Alter_detail
# * @功能描述: 变更内容详情
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@Alter_login_required
def Alter_detail(request,id):#变更详情页面
Alterdeatil =Alter_managment.objects.get(id=id)
if Alterdeatil:
context = {
'Alterdeatil': Alterdeatil
}
return render(request,"Alter_management/Alter_detail.html",context=context)
else:
return resful.params_error(message='没有找到详情数据')
def test_review(request):
id=request.GET.get('id')
print('获取到的id是:',id)
datas=Alter_managment.objects.values('pk','reviewstatus','reviewcontent').filter(pk=id)
datas =list(datas)
data ={'code':200,'data':datas}
return JsonResponse(data,safe=False)
| 1:02
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
def delete_Alter_manager(request):#变更内容删除用
if request.user.has_perm('Alter_management.change_alter_managment'):
id=request.POST.get("id")
try:
Alter_managment.objects.filter(id=id).delete()
Alter_managment_checked.objects.filter(alterid=id).delete()
return resful.OK()
except:
return resful.params_error(message="该变更不存在")
else:
return resful.unauth(message='您没有删除的权限!')
# * @函数名: add_Alter_managerView
# # * @功能描述: 添加变更内容
# # * @作者: 郭军
# # * @时间: 2019-6-30 15:28:19
# # * @最后编辑时间: 2019-9-3 10:00:36
# # * @最后编辑者: 郭军
class add_Alter_managerView(View):
def get(self,request):
Databases=Alt_Database.objects.all()
context={
'Databases':Databases
}
return render(request,'Alter_management/Alter.html',context=context)
def post(self,request):#添加变更内容
if request.user.has_perm('Alter_management.change_alter_managment'):
form = Alterform(request.POST)
#如果验证成功
if form.is_valid():
| identifier_body |
simpletests.py | from pymarketo.client import MarketoClientFactory
import os
import sys #@UnusedImport
import time #@UnusedImport
import datetime #@UnusedImport
from pprint import pprint #@UnresolvedImport
TESTDIR = os.path.split(__file__)[0]
PACKAGEDIR = os.path.join(TESTDIR,"..")
INIFILE = os.path.join(PACKAGEDIR,"marketo.ini")
DATAFILES=["specification","listMObjects"]
# The following must be set up on your marketo account to enable tests
LEADEMAIL = "seant@webreply.com" # Email of an internal contact
LEADLIST = "2wr-0" # List name containing LEADEMAIL contact
SPECIALCODE = "WebReplyJobCode" # If your leads have a custom field that can be
SPECIALVALUE= "WEBREPLY" # asserted for LEADEMAIL, set them here
TESTCAMPAIGN = "SOAP API Access test" # Name of test campaign that has SOAP API trigger enabled
DELETECAMPAIGN = "Delete lead" # Campaign configure to delete leads added to the campaign
# First and last names, and synthetic email addresses for new leads
# These will be added and then deleted
TESTDOMAIN="webreply.com"
TESTNAMES = [("One","Test",TESTDOMAIN),("Two","Test",TESTDOMAIN)]
TESTEMAILS = ["%s.%s@%s" % name for name in TESTNAMES]
mc = MarketoClientFactory(INIFILE)
def compareData(datafile, data):
path = os.path.join(TESTDIR,datafile+".txt")
return open(path).read().strip() == data.strip()
def test_data():
"Make sure that all the test data files are present"
assert os.path.exists(INIFILE)
for datafile in DATAFILES:
assert os.path.exists(os.path.join(TESTDIR,datafile+".txt"))
# Factory methods to build structures for arguments
def aStringArray(strings):
|
def aLeadKey(email=None,id=None):
leadkey = mc.factory.create("LeadKey")
if email:
leadkey.keyType = "EMAIL"
leadkey.keyValue = email
elif id:
leadkey.keyType = "IDNUM"
leadkey.keyValue = id
return leadkey
def aLeadKeyArray(leads):
lka = mc.factory.create("ArrayOfLeadKey")
lka.leadKey = leads
return lka
def aListKey(lk, keyType = "MKTOLISTNAME"):
listkey = mc.factory.create("ListKey")
listkey.keyType = keyType
listkey.keyValue = lk
return listkey
def anAttrib(**kwargs):
attrib = mc.factory.create("Attrib")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttribArray(attribs):
aa = mc.factory.create("ArrayOfAttrib")
aa.attrib=attribs
return aa
def anAttribute(**kwargs):
attrib = mc.factory.create("Attribute")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttributeArray(attributes):
aa = mc.factory.create("ArrayOfAttribute")
aa.attribute=attributes
return aa
def aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):
lr = mc.factory.create("LeadRecord")
if id:
lr.Id = id
elif email:
lr.Email = email
elif foreignsyspersonid:
assert foreignsystype
lr.ForeignSysPersonId = foreignsyspersonid
lr.ForeignSysType = foreignsystype
if attributes:
lr.leadAttributeList = attributes
return lr
def aLeadRecordArray(leadrecords):
lra = mc.factory.create("ArrayOfLeadRecord")
lra.leadRecord = leadrecords
return lra
# Several things come back with an attribute list that is more pleasant as a dictionary
def attrs2dict(attributelist):
if attributelist is None:
return {}
attributelist = attributelist[0]
d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])
return d
def dict2attrs(d):
al = []
for key, value in d.items():
al.append(anAttribute(attrName=key,attrValue=value))
return anAttributeArray(al)
def test_specification():
compareData("specification", str(mc))
# As of 1.7, these are the methods
# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )
# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )
# Tested: describeMObject(xs:string objectName, )
# Requires having a trigger set for the campaign, from Marketo support:
# Your SOAP request is fine. In order for the getCampaignsForSource call to work,
# you must have a "Campaign is Requested" trigger in the your campaign set to Web Service API.
# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )
# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )
# Tested: getLead(LeadKey leadKey, )
# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )
# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )
# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )
# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )
# Tested: listMObjects()
# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )
# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )
# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )
# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )
# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )
# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )
# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )
# Campaign sources
# <xs:enumeration value="MKTOWS"/>
# <xs:enumeration value="SALES"/>
def test_getCampaignsForSource():
print "Testing getCampaignsForSource"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
resultCount = campaigns.returnCount
campaignrecords = campaigns.campaignRecordList[0]
assert resultCount==len(campaignrecords), "Result count '%s' does not match campaign list '%s'" % (resultCount, len(campaigns))
for campaign in campaignrecords:
print campaign.id, campaign.name, campaign.description
print
def test_getLead():
print "Testing getLead"
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
attrs = attrs2dict(lead.leadAttributeList)
print lead.Id, lead.Email
pprint(attrs)
if SPECIALCODE and SPECIALVALUE:
assert attrs[SPECIALCODE] == SPECIALVALUE
print
# As of 1.7, theses are the activity types
# <xs:enumeration value="VisitWebpage"/>
# <xs:enumeration value="FillOutForm"/>
# <xs:enumeration value="ClickLink"/>
# <xs:enumeration value="RegisterForEvent"/>
# <xs:enumeration value="AttendEvent"/>
# <xs:enumeration value="SendEmail"/>
# <xs:enumeration value="EmailDelivered"/>
# <xs:enumeration value="EmailBounced"/>
# <xs:enumeration value="UnsubscribeEmail"/>
# <xs:enumeration value="OpenEmail"/>
# <xs:enumeration value="ClickEmail"/>
# <xs:enumeration value="NewLead"/>
# <xs:enumeration value="ChangeDataValue"/>
# <xs:enumeration value="LeadAssigned"/>
# <xs:enumeration value="NewSFDCOpprtnty"/>
# <xs:enumeration value="Wait"/>
# <xs:enumeration value="RunSubflow"/>
# <xs:enumeration value="RemoveFromFlow"/>
# <xs:enumeration value="PushLeadToSales"/>
# <xs:enumeration value="CreateTask"/>
# <xs:enumeration value="ConvertLead"/>
# <xs:enumeration value="ChangeScore"/>
# <xs:enumeration value="ChangeOwner"/>
# <xs:enumeration value="AddToList"/>
# <xs:enumeration value="RemoveFromList"/>
# <xs:enumeration value="SFDCActivity"/>
# <xs:enumeration value="EmailBouncedSoft"/>
# <xs:enumeration value="PushLeadUpdatesToSales"/>
# <xs:enumeration value="DeleteLeadFromSales"/>
# <xs:enumeration value="SFDCActivityUpdated"/>
# <xs:enumeration value="SFDCMergeLeads"/>
# <xs:enumeration value="MergeLeads"/>
# <xs:enumeration value="ResolveConflicts"/>
# <xs:enumeration value="AssocWithOpprtntyInSales"/>
# <xs:enumeration value="DissocFromOpprtntyInSales"/>
# <xs:enumeration value="UpdateOpprtntyInSales"/>
# <xs:enumeration value="DeleteLead"/>
# <xs:enumeration value="SendAlert"/>
# <xs:enumeration value="SendSalesEmail"/>
# <xs:enumeration value="OpenSalesEmail"/>
# <xs:enumeration value="ClickSalesEmail"/>
# <xs:enumeration value="AddtoSFDCCampaign"/>
# <xs:enumeration value="RemoveFromSFDCCampaign"/>
# <xs:enumeration value="ChangeStatusInSFDCCampaign"/>
# <xs:enumeration value="ReceiveSalesEmail"/>
# <xs:enumeration value="InterestingMoment"/>
# <xs:enumeration value="RequestCampaign"/>
# <xs:enumeration value="SalesEmailBounced"/>
# <xs:enumeration value="ChangeLeadPartition"/>
# <xs:enumeration value="ChangeRevenueStage"/>
# <xs:enumeration value="ChangeRevenueStageManually"/>
# <xs:enumeration value="ComputeDataValue"/>
# <xs:enumeration value="ChangeStatusInProgression"/>
# <xs:enumeration value="ChangeFieldInProgram"/>
# <xs:enumeration value="EnrichWithJigsaw"/>
def test_getLeadActivity():
print "Testing getLeadActivity"
leadkey = aLeadKey(email=LEADEMAIL)
activities = mc.service.getLeadActivity(leadkey,"")
assert activities.returnCount > 0
activityrecords = activities.activityRecordList[0]
assert len(activityrecords) == activities.returnCount
for activity in activityrecords:
print "Activity", activity.activityDateTime,activity.activityType
attrs = attrs2dict(activity.activityAttributes)
pprint(attrs)
print
def test_requestCampaign():
print "Testing requestCampaign"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == TESTCAMPAIGN:
print "Found", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
leadid = lead.Id
# Add key appears to want ID
leadkey = aLeadKey(id=leadid)
lka = aLeadKeyArray([leadkey])
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
assert result.success
print
def test_deleteLeads():
# Depends on a campaign that deletes leads as they ar added
# We also need to know the IDNUM for the contacts
lka = []
for email in TESTEMAILS:
leadkey = aLeadKey(email=email)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
lka.append(aLeadKey(id=lead.Id))
print "Found lead", lead.Id, lead.Email
lka = aLeadKeyArray(lka)
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == DELETECAMPAIGN:
print "Found campaign", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
print result
def test_getLeadChanges():
print "Testing getLeadChanges"
since = datetime.datetime(year=2010,month=1, day=1)
changes = mc.service.getLeadChanges("",since,10)
assert changes.returnCount == 10
changerecords = changes.leadChangeRecordList[0]
assert len(changerecords) == changes.returnCount
for change in changerecords:
print "leadChange", change.activityDateTime,change.activityType
pprint(attrs2dict(change.activityAttributes))
print
def test_getMultipleLeads():
print "Testing getMultipleLeads"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
def test_getMultipleLeadsUnsubscribedFlag():
print "Testing getMultipleLeadsUnsubscribedFlag"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
attributelist = aStringArray(["Suppressed"])
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
# Valid list operations as of 1.7
# <xs:enumeration value="ADDTOLIST"/>
# <xs:enumeration value="ISMEMBEROFLIST"/>
# <xs:enumeration value="REMOVEFROMLIST"/>
# Valid list types
# <xs:enumeration value="MKTOLISTNAME"/>
# <xs:enumeration value="MKTOSALESUSERID"/>
# <xs:enumeration value="SFDCLEADOWNERID"/>
def test_listOperation():
print "Testing listOperation"
# Require numeric id fields
leadkey = aLeadKey(id=1256) # Is member
leadkey2 = aLeadKey(id=1) # Is not member
result = mc.service.listOperation("ISMEMBEROFLIST",aListKey(LEADLIST),
aLeadKeyArray([leadkey,leadkey2]),True)
print "listOperation", result
def test_syncLead():
print "Testing syncLead"
# This test does a create the first time only.
# The name and email are used in the "standard" marketo API examples
attrs = dict(FirstName="Sam",LastName="Haggy")
leadrecord = aLeadRecord(email="shaggy@marketo.com",attributes=dict2attrs(attrs))
result = mc.service.syncLead(leadrecord, True, None)
print result.leadId, result.syncStatus.status
def test_syncMultipleLeads():
print "Testing syncMultipleLeads"
leadrecords = []
for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):
leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))
leadrecords.append(leadrecord)
lra = aLeadRecordArray(leadrecords)
print lra
result = mc.service.syncMultipleLeads(lra)
print result
print
def test_listMObjects():
print "Testing listMObjects"
mobjects = mc.service.listMObjects()
compareData("listMObjects", str(mobjects))
print
def test_describeMObject():
print "Testing describeMObject"
mobjects = ["ActivityRecord","LeadRecord","Opportunity","OpportunityPersonRole",]
descriptions = []
for mobject in mobjects:
descriptions.append(str(mc.service.describeMObject(mobject)))
descriptions = "\n".join(descriptions)
compareData("describeMObjects", descriptions)
print
if __name__ == "__main__":
test_data()
test_specification()
test_getLead()
test_getCampaignsForSource()
test_requestCampaign()
test_getLeadActivity()
test_getLeadChanges()
test_listMObjects()
test_describeMObject()
test_getLeadActivity()
test_getMultipleLeads()
test_getMultipleLeadsUnsubscribedFlag()
test_listOperation()
test_syncLead()
test_syncMultipleLeads()
test_deleteLeads()
print "All is well"
| asa = mc.factory.create("ArrayOfString")
asa.stringItem = strings
return asa | identifier_body |
simpletests.py | from pymarketo.client import MarketoClientFactory
import os
import sys #@UnusedImport
import time #@UnusedImport
import datetime #@UnusedImport
from pprint import pprint #@UnresolvedImport
TESTDIR = os.path.split(__file__)[0]
PACKAGEDIR = os.path.join(TESTDIR,"..")
INIFILE = os.path.join(PACKAGEDIR,"marketo.ini")
DATAFILES=["specification","listMObjects"]
# The following must be set up on your marketo account to enable tests
LEADEMAIL = "seant@webreply.com" # Email of an internal contact
LEADLIST = "2wr-0" # List name containing LEADEMAIL contact
SPECIALCODE = "WebReplyJobCode" # If your leads have a custom field that can be
SPECIALVALUE= "WEBREPLY" # asserted for LEADEMAIL, set them here
TESTCAMPAIGN = "SOAP API Access test" # Name of test campaign that has SOAP API trigger enabled
DELETECAMPAIGN = "Delete lead" # Campaign configure to delete leads added to the campaign
# First and last names, and synthetic email addresses for new leads
# These will be added and then deleted
TESTDOMAIN="webreply.com"
TESTNAMES = [("One","Test",TESTDOMAIN),("Two","Test",TESTDOMAIN)]
TESTEMAILS = ["%s.%s@%s" % name for name in TESTNAMES]
mc = MarketoClientFactory(INIFILE)
def compareData(datafile, data):
path = os.path.join(TESTDIR,datafile+".txt")
return open(path).read().strip() == data.strip()
def test_data():
"Make sure that all the test data files are present"
assert os.path.exists(INIFILE)
for datafile in DATAFILES:
assert os.path.exists(os.path.join(TESTDIR,datafile+".txt"))
# Factory methods to build structures for arguments
def aStringArray(strings):
asa = mc.factory.create("ArrayOfString")
asa.stringItem = strings
return asa
def aLeadKey(email=None,id=None):
leadkey = mc.factory.create("LeadKey")
if email:
leadkey.keyType = "EMAIL"
leadkey.keyValue = email
elif id:
leadkey.keyType = "IDNUM"
leadkey.keyValue = id
return leadkey
def aLeadKeyArray(leads):
lka = mc.factory.create("ArrayOfLeadKey")
lka.leadKey = leads
return lka
def aListKey(lk, keyType = "MKTOLISTNAME"):
listkey = mc.factory.create("ListKey")
listkey.keyType = keyType
listkey.keyValue = lk
return listkey
def anAttrib(**kwargs):
attrib = mc.factory.create("Attrib")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttribArray(attribs):
aa = mc.factory.create("ArrayOfAttrib")
aa.attrib=attribs
return aa
def anAttribute(**kwargs):
attrib = mc.factory.create("Attribute")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttributeArray(attributes):
aa = mc.factory.create("ArrayOfAttribute")
aa.attribute=attributes
return aa
def aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):
lr = mc.factory.create("LeadRecord")
if id:
lr.Id = id
elif email:
lr.Email = email
elif foreignsyspersonid:
assert foreignsystype
lr.ForeignSysPersonId = foreignsyspersonid
lr.ForeignSysType = foreignsystype
if attributes:
lr.leadAttributeList = attributes
return lr
def aLeadRecordArray(leadrecords):
lra = mc.factory.create("ArrayOfLeadRecord")
lra.leadRecord = leadrecords
return lra
# Several things come back with an attribute list that is more pleasant as a dictionary
def attrs2dict(attributelist):
if attributelist is None:
return {}
attributelist = attributelist[0]
d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])
return d
def dict2attrs(d):
al = []
for key, value in d.items():
al.append(anAttribute(attrName=key,attrValue=value))
return anAttributeArray(al)
def test_specification():
compareData("specification", str(mc))
# As of 1.7, these are the methods
# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )
# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )
# Tested: describeMObject(xs:string objectName, )
# Requires having a trigger set for the campaign, from Marketo support:
# Your SOAP request is fine. In order for the getCampaignsForSource call to work,
# you must have a "Campaign is Requested" trigger in the your campaign set to Web Service API.
# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )
# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )
# Tested: getLead(LeadKey leadKey, )
# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )
# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )
# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )
# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )
# Tested: listMObjects()
# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )
# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )
# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )
# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )
# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )
# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )
# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )
# Campaign sources
# <xs:enumeration value="MKTOWS"/>
# <xs:enumeration value="SALES"/>
def test_getCampaignsForSource():
print "Testing getCampaignsForSource"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
resultCount = campaigns.returnCount
campaignrecords = campaigns.campaignRecordList[0]
assert resultCount==len(campaignrecords), "Result count '%s' does not match campaign list '%s'" % (resultCount, len(campaigns))
for campaign in campaignrecords:
print campaign.id, campaign.name, campaign.description
print
def test_getLead():
print "Testing getLead"
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
attrs = attrs2dict(lead.leadAttributeList)
print lead.Id, lead.Email
pprint(attrs)
if SPECIALCODE and SPECIALVALUE:
assert attrs[SPECIALCODE] == SPECIALVALUE
print
# As of 1.7, theses are the activity types
# <xs:enumeration value="VisitWebpage"/>
# <xs:enumeration value="FillOutForm"/>
# <xs:enumeration value="ClickLink"/>
# <xs:enumeration value="RegisterForEvent"/> | # <xs:enumeration value="OpenEmail"/>
# <xs:enumeration value="ClickEmail"/>
# <xs:enumeration value="NewLead"/>
# <xs:enumeration value="ChangeDataValue"/>
# <xs:enumeration value="LeadAssigned"/>
# <xs:enumeration value="NewSFDCOpprtnty"/>
# <xs:enumeration value="Wait"/>
# <xs:enumeration value="RunSubflow"/>
# <xs:enumeration value="RemoveFromFlow"/>
# <xs:enumeration value="PushLeadToSales"/>
# <xs:enumeration value="CreateTask"/>
# <xs:enumeration value="ConvertLead"/>
# <xs:enumeration value="ChangeScore"/>
# <xs:enumeration value="ChangeOwner"/>
# <xs:enumeration value="AddToList"/>
# <xs:enumeration value="RemoveFromList"/>
# <xs:enumeration value="SFDCActivity"/>
# <xs:enumeration value="EmailBouncedSoft"/>
# <xs:enumeration value="PushLeadUpdatesToSales"/>
# <xs:enumeration value="DeleteLeadFromSales"/>
# <xs:enumeration value="SFDCActivityUpdated"/>
# <xs:enumeration value="SFDCMergeLeads"/>
# <xs:enumeration value="MergeLeads"/>
# <xs:enumeration value="ResolveConflicts"/>
# <xs:enumeration value="AssocWithOpprtntyInSales"/>
# <xs:enumeration value="DissocFromOpprtntyInSales"/>
# <xs:enumeration value="UpdateOpprtntyInSales"/>
# <xs:enumeration value="DeleteLead"/>
# <xs:enumeration value="SendAlert"/>
# <xs:enumeration value="SendSalesEmail"/>
# <xs:enumeration value="OpenSalesEmail"/>
# <xs:enumeration value="ClickSalesEmail"/>
# <xs:enumeration value="AddtoSFDCCampaign"/>
# <xs:enumeration value="RemoveFromSFDCCampaign"/>
# <xs:enumeration value="ChangeStatusInSFDCCampaign"/>
# <xs:enumeration value="ReceiveSalesEmail"/>
# <xs:enumeration value="InterestingMoment"/>
# <xs:enumeration value="RequestCampaign"/>
# <xs:enumeration value="SalesEmailBounced"/>
# <xs:enumeration value="ChangeLeadPartition"/>
# <xs:enumeration value="ChangeRevenueStage"/>
# <xs:enumeration value="ChangeRevenueStageManually"/>
# <xs:enumeration value="ComputeDataValue"/>
# <xs:enumeration value="ChangeStatusInProgression"/>
# <xs:enumeration value="ChangeFieldInProgram"/>
# <xs:enumeration value="EnrichWithJigsaw"/>
def test_getLeadActivity():
print "Testing getLeadActivity"
leadkey = aLeadKey(email=LEADEMAIL)
activities = mc.service.getLeadActivity(leadkey,"")
assert activities.returnCount > 0
activityrecords = activities.activityRecordList[0]
assert len(activityrecords) == activities.returnCount
for activity in activityrecords:
print "Activity", activity.activityDateTime,activity.activityType
attrs = attrs2dict(activity.activityAttributes)
pprint(attrs)
print
def test_requestCampaign():
print "Testing requestCampaign"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == TESTCAMPAIGN:
print "Found", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
leadid = lead.Id
# Add key appears to want ID
leadkey = aLeadKey(id=leadid)
lka = aLeadKeyArray([leadkey])
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
assert result.success
print
def test_deleteLeads():
# Depends on a campaign that deletes leads as they ar added
# We also need to know the IDNUM for the contacts
lka = []
for email in TESTEMAILS:
leadkey = aLeadKey(email=email)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
lka.append(aLeadKey(id=lead.Id))
print "Found lead", lead.Id, lead.Email
lka = aLeadKeyArray(lka)
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == DELETECAMPAIGN:
print "Found campaign", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
print result
def test_getLeadChanges():
print "Testing getLeadChanges"
since = datetime.datetime(year=2010,month=1, day=1)
changes = mc.service.getLeadChanges("",since,10)
assert changes.returnCount == 10
changerecords = changes.leadChangeRecordList[0]
assert len(changerecords) == changes.returnCount
for change in changerecords:
print "leadChange", change.activityDateTime,change.activityType
pprint(attrs2dict(change.activityAttributes))
print
def test_getMultipleLeads():
print "Testing getMultipleLeads"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
def test_getMultipleLeadsUnsubscribedFlag():
print "Testing getMultipleLeadsUnsubscribedFlag"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
attributelist = aStringArray(["Suppressed"])
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
# Valid list operations as of 1.7
# <xs:enumeration value="ADDTOLIST"/>
# <xs:enumeration value="ISMEMBEROFLIST"/>
# <xs:enumeration value="REMOVEFROMLIST"/>
# Valid list types
# <xs:enumeration value="MKTOLISTNAME"/>
# <xs:enumeration value="MKTOSALESUSERID"/>
# <xs:enumeration value="SFDCLEADOWNERID"/>
def test_listOperation():
print "Testing listOperation"
# Require numeric id fields
leadkey = aLeadKey(id=1256) # Is member
leadkey2 = aLeadKey(id=1) # Is not member
result = mc.service.listOperation("ISMEMBEROFLIST",aListKey(LEADLIST),
aLeadKeyArray([leadkey,leadkey2]),True)
print "listOperation", result
def test_syncLead():
print "Testing syncLead"
# This test does a create the first time only.
# The name and email are used in the "standard" marketo API examples
attrs = dict(FirstName="Sam",LastName="Haggy")
leadrecord = aLeadRecord(email="shaggy@marketo.com",attributes=dict2attrs(attrs))
result = mc.service.syncLead(leadrecord, True, None)
print result.leadId, result.syncStatus.status
def test_syncMultipleLeads():
print "Testing syncMultipleLeads"
leadrecords = []
for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):
leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))
leadrecords.append(leadrecord)
lra = aLeadRecordArray(leadrecords)
print lra
result = mc.service.syncMultipleLeads(lra)
print result
print
def test_listMObjects():
print "Testing listMObjects"
mobjects = mc.service.listMObjects()
compareData("listMObjects", str(mobjects))
print
def test_describeMObject():
print "Testing describeMObject"
mobjects = ["ActivityRecord","LeadRecord","Opportunity","OpportunityPersonRole",]
descriptions = []
for mobject in mobjects:
descriptions.append(str(mc.service.describeMObject(mobject)))
descriptions = "\n".join(descriptions)
compareData("describeMObjects", descriptions)
print
if __name__ == "__main__":
test_data()
test_specification()
test_getLead()
test_getCampaignsForSource()
test_requestCampaign()
test_getLeadActivity()
test_getLeadChanges()
test_listMObjects()
test_describeMObject()
test_getLeadActivity()
test_getMultipleLeads()
test_getMultipleLeadsUnsubscribedFlag()
test_listOperation()
test_syncLead()
test_syncMultipleLeads()
test_deleteLeads()
print "All is well" | # <xs:enumeration value="AttendEvent"/>
# <xs:enumeration value="SendEmail"/>
# <xs:enumeration value="EmailDelivered"/>
# <xs:enumeration value="EmailBounced"/>
# <xs:enumeration value="UnsubscribeEmail"/> | random_line_split |
simpletests.py | from pymarketo.client import MarketoClientFactory
import os
import sys #@UnusedImport
import time #@UnusedImport
import datetime #@UnusedImport
from pprint import pprint #@UnresolvedImport
TESTDIR = os.path.split(__file__)[0]
PACKAGEDIR = os.path.join(TESTDIR,"..")
INIFILE = os.path.join(PACKAGEDIR,"marketo.ini")
DATAFILES=["specification","listMObjects"]
# The following must be set up on your marketo account to enable tests
LEADEMAIL = "seant@webreply.com" # Email of an internal contact
LEADLIST = "2wr-0" # List name containing LEADEMAIL contact
SPECIALCODE = "WebReplyJobCode" # If your leads have a custom field that can be
SPECIALVALUE= "WEBREPLY" # asserted for LEADEMAIL, set them here
TESTCAMPAIGN = "SOAP API Access test" # Name of test campaign that has SOAP API trigger enabled
DELETECAMPAIGN = "Delete lead" # Campaign configure to delete leads added to the campaign
# First and last names, and synthetic email addresses for new leads
# These will be added and then deleted
TESTDOMAIN="webreply.com"
TESTNAMES = [("One","Test",TESTDOMAIN),("Two","Test",TESTDOMAIN)]
TESTEMAILS = ["%s.%s@%s" % name for name in TESTNAMES]
mc = MarketoClientFactory(INIFILE)
def compareData(datafile, data):
path = os.path.join(TESTDIR,datafile+".txt")
return open(path).read().strip() == data.strip()
def test_data():
"Make sure that all the test data files are present"
assert os.path.exists(INIFILE)
for datafile in DATAFILES:
assert os.path.exists(os.path.join(TESTDIR,datafile+".txt"))
# Factory methods to build structures for arguments
def aStringArray(strings):
asa = mc.factory.create("ArrayOfString")
asa.stringItem = strings
return asa
def aLeadKey(email=None,id=None):
leadkey = mc.factory.create("LeadKey")
if email:
leadkey.keyType = "EMAIL"
leadkey.keyValue = email
elif id:
leadkey.keyType = "IDNUM"
leadkey.keyValue = id
return leadkey
def aLeadKeyArray(leads):
lka = mc.factory.create("ArrayOfLeadKey")
lka.leadKey = leads
return lka
def aListKey(lk, keyType = "MKTOLISTNAME"):
listkey = mc.factory.create("ListKey")
listkey.keyType = keyType
listkey.keyValue = lk
return listkey
def anAttrib(**kwargs):
attrib = mc.factory.create("Attrib")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttribArray(attribs):
aa = mc.factory.create("ArrayOfAttrib")
aa.attrib=attribs
return aa
def anAttribute(**kwargs):
attrib = mc.factory.create("Attribute")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttributeArray(attributes):
aa = mc.factory.create("ArrayOfAttribute")
aa.attribute=attributes
return aa
def aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):
lr = mc.factory.create("LeadRecord")
if id:
lr.Id = id
elif email:
lr.Email = email
elif foreignsyspersonid:
assert foreignsystype
lr.ForeignSysPersonId = foreignsyspersonid
lr.ForeignSysType = foreignsystype
if attributes:
lr.leadAttributeList = attributes
return lr
def aLeadRecordArray(leadrecords):
lra = mc.factory.create("ArrayOfLeadRecord")
lra.leadRecord = leadrecords
return lra
# Several things come back with an attribute list that is more pleasant as a dictionary
def attrs2dict(attributelist):
if attributelist is None:
return {}
attributelist = attributelist[0]
d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])
return d
def dict2attrs(d):
al = []
for key, value in d.items():
al.append(anAttribute(attrName=key,attrValue=value))
return anAttributeArray(al)
def test_specification():
compareData("specification", str(mc))
# As of 1.7, these are the methods
# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )
# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )
# Tested: describeMObject(xs:string objectName, )
# Requires having a trigger set for the campaign, from Marketo support:
# Your SOAP request is fine. In order for the getCampaignsForSource call to work,
# you must have a "Campaign is Requested" trigger in the your campaign set to Web Service API.
# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )
# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )
# Tested: getLead(LeadKey leadKey, )
# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )
# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )
# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )
# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )
# Tested: listMObjects()
# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )
# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )
# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )
# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )
# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )
# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )
# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )
# Campaign sources
# <xs:enumeration value="MKTOWS"/>
# <xs:enumeration value="SALES"/>
def test_getCampaignsForSource():
print "Testing getCampaignsForSource"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
resultCount = campaigns.returnCount
campaignrecords = campaigns.campaignRecordList[0]
assert resultCount==len(campaignrecords), "Result count '%s' does not match campaign list '%s'" % (resultCount, len(campaigns))
for campaign in campaignrecords:
print campaign.id, campaign.name, campaign.description
print
def test_getLead():
print "Testing getLead"
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
attrs = attrs2dict(lead.leadAttributeList)
print lead.Id, lead.Email
pprint(attrs)
if SPECIALCODE and SPECIALVALUE:
assert attrs[SPECIALCODE] == SPECIALVALUE
print
# As of 1.7, theses are the activity types
# <xs:enumeration value="VisitWebpage"/>
# <xs:enumeration value="FillOutForm"/>
# <xs:enumeration value="ClickLink"/>
# <xs:enumeration value="RegisterForEvent"/>
# <xs:enumeration value="AttendEvent"/>
# <xs:enumeration value="SendEmail"/>
# <xs:enumeration value="EmailDelivered"/>
# <xs:enumeration value="EmailBounced"/>
# <xs:enumeration value="UnsubscribeEmail"/>
# <xs:enumeration value="OpenEmail"/>
# <xs:enumeration value="ClickEmail"/>
# <xs:enumeration value="NewLead"/>
# <xs:enumeration value="ChangeDataValue"/>
# <xs:enumeration value="LeadAssigned"/>
# <xs:enumeration value="NewSFDCOpprtnty"/>
# <xs:enumeration value="Wait"/>
# <xs:enumeration value="RunSubflow"/>
# <xs:enumeration value="RemoveFromFlow"/>
# <xs:enumeration value="PushLeadToSales"/>
# <xs:enumeration value="CreateTask"/>
# <xs:enumeration value="ConvertLead"/>
# <xs:enumeration value="ChangeScore"/>
# <xs:enumeration value="ChangeOwner"/>
# <xs:enumeration value="AddToList"/>
# <xs:enumeration value="RemoveFromList"/>
# <xs:enumeration value="SFDCActivity"/>
# <xs:enumeration value="EmailBouncedSoft"/>
# <xs:enumeration value="PushLeadUpdatesToSales"/>
# <xs:enumeration value="DeleteLeadFromSales"/>
# <xs:enumeration value="SFDCActivityUpdated"/>
# <xs:enumeration value="SFDCMergeLeads"/>
# <xs:enumeration value="MergeLeads"/>
# <xs:enumeration value="ResolveConflicts"/>
# <xs:enumeration value="AssocWithOpprtntyInSales"/>
# <xs:enumeration value="DissocFromOpprtntyInSales"/>
# <xs:enumeration value="UpdateOpprtntyInSales"/>
# <xs:enumeration value="DeleteLead"/>
# <xs:enumeration value="SendAlert"/>
# <xs:enumeration value="SendSalesEmail"/>
# <xs:enumeration value="OpenSalesEmail"/>
# <xs:enumeration value="ClickSalesEmail"/>
# <xs:enumeration value="AddtoSFDCCampaign"/>
# <xs:enumeration value="RemoveFromSFDCCampaign"/>
# <xs:enumeration value="ChangeStatusInSFDCCampaign"/>
# <xs:enumeration value="ReceiveSalesEmail"/>
# <xs:enumeration value="InterestingMoment"/>
# <xs:enumeration value="RequestCampaign"/>
# <xs:enumeration value="SalesEmailBounced"/>
# <xs:enumeration value="ChangeLeadPartition"/>
# <xs:enumeration value="ChangeRevenueStage"/>
# <xs:enumeration value="ChangeRevenueStageManually"/>
# <xs:enumeration value="ComputeDataValue"/>
# <xs:enumeration value="ChangeStatusInProgression"/>
# <xs:enumeration value="ChangeFieldInProgram"/>
# <xs:enumeration value="EnrichWithJigsaw"/>
def test_getLeadActivity():
print "Testing getLeadActivity"
leadkey = aLeadKey(email=LEADEMAIL)
activities = mc.service.getLeadActivity(leadkey,"")
assert activities.returnCount > 0
activityrecords = activities.activityRecordList[0]
assert len(activityrecords) == activities.returnCount
for activity in activityrecords:
print "Activity", activity.activityDateTime,activity.activityType
attrs = attrs2dict(activity.activityAttributes)
pprint(attrs)
print
def test_requestCampaign():
print "Testing requestCampaign"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == TESTCAMPAIGN:
print "Found", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
leadid = lead.Id
# Add key appears to want ID
leadkey = aLeadKey(id=leadid)
lka = aLeadKeyArray([leadkey])
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
assert result.success
print
def test_deleteLeads():
# Depends on a campaign that deletes leads as they ar added
# We also need to know the IDNUM for the contacts
lka = []
for email in TESTEMAILS:
leadkey = aLeadKey(email=email)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
lka.append(aLeadKey(id=lead.Id))
print "Found lead", lead.Id, lead.Email
lka = aLeadKeyArray(lka)
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == DELETECAMPAIGN:
print "Found campaign", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
print result
def test_getLeadChanges():
print "Testing getLeadChanges"
since = datetime.datetime(year=2010,month=1, day=1)
changes = mc.service.getLeadChanges("",since,10)
assert changes.returnCount == 10
changerecords = changes.leadChangeRecordList[0]
assert len(changerecords) == changes.returnCount
for change in changerecords:
print "leadChange", change.activityDateTime,change.activityType
pprint(attrs2dict(change.activityAttributes))
print
def test_getMultipleLeads():
print "Testing getMultipleLeads"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
def test_getMultipleLeadsUnsubscribedFlag():
print "Testing getMultipleLeadsUnsubscribedFlag"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
attributelist = aStringArray(["Suppressed"])
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
# Valid list operations as of 1.7
# <xs:enumeration value="ADDTOLIST"/>
# <xs:enumeration value="ISMEMBEROFLIST"/>
# <xs:enumeration value="REMOVEFROMLIST"/>
# Valid list types
# <xs:enumeration value="MKTOLISTNAME"/>
# <xs:enumeration value="MKTOSALESUSERID"/>
# <xs:enumeration value="SFDCLEADOWNERID"/>
def | ():
print "Testing listOperation"
# Require numeric id fields
leadkey = aLeadKey(id=1256) # Is member
leadkey2 = aLeadKey(id=1) # Is not member
result = mc.service.listOperation("ISMEMBEROFLIST",aListKey(LEADLIST),
aLeadKeyArray([leadkey,leadkey2]),True)
print "listOperation", result
def test_syncLead():
print "Testing syncLead"
# This test does a create the first time only.
# The name and email are used in the "standard" marketo API examples
attrs = dict(FirstName="Sam",LastName="Haggy")
leadrecord = aLeadRecord(email="shaggy@marketo.com",attributes=dict2attrs(attrs))
result = mc.service.syncLead(leadrecord, True, None)
print result.leadId, result.syncStatus.status
def test_syncMultipleLeads():
print "Testing syncMultipleLeads"
leadrecords = []
for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):
leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))
leadrecords.append(leadrecord)
lra = aLeadRecordArray(leadrecords)
print lra
result = mc.service.syncMultipleLeads(lra)
print result
print
def test_listMObjects():
print "Testing listMObjects"
mobjects = mc.service.listMObjects()
compareData("listMObjects", str(mobjects))
print
def test_describeMObject():
print "Testing describeMObject"
mobjects = ["ActivityRecord","LeadRecord","Opportunity","OpportunityPersonRole",]
descriptions = []
for mobject in mobjects:
descriptions.append(str(mc.service.describeMObject(mobject)))
descriptions = "\n".join(descriptions)
compareData("describeMObjects", descriptions)
print
if __name__ == "__main__":
test_data()
test_specification()
test_getLead()
test_getCampaignsForSource()
test_requestCampaign()
test_getLeadActivity()
test_getLeadChanges()
test_listMObjects()
test_describeMObject()
test_getLeadActivity()
test_getMultipleLeads()
test_getMultipleLeadsUnsubscribedFlag()
test_listOperation()
test_syncLead()
test_syncMultipleLeads()
test_deleteLeads()
print "All is well"
| test_listOperation | identifier_name |
simpletests.py | from pymarketo.client import MarketoClientFactory
import os
import sys #@UnusedImport
import time #@UnusedImport
import datetime #@UnusedImport
from pprint import pprint #@UnresolvedImport
TESTDIR = os.path.split(__file__)[0]
PACKAGEDIR = os.path.join(TESTDIR,"..")
INIFILE = os.path.join(PACKAGEDIR,"marketo.ini")
DATAFILES=["specification","listMObjects"]
# The following must be set up on your marketo account to enable tests
LEADEMAIL = "seant@webreply.com" # Email of an internal contact
LEADLIST = "2wr-0" # List name containing LEADEMAIL contact
SPECIALCODE = "WebReplyJobCode" # If your leads have a custom field that can be
SPECIALVALUE= "WEBREPLY" # asserted for LEADEMAIL, set them here
TESTCAMPAIGN = "SOAP API Access test" # Name of test campaign that has SOAP API trigger enabled
DELETECAMPAIGN = "Delete lead" # Campaign configure to delete leads added to the campaign
# First and last names, and synthetic email addresses for new leads
# These will be added and then deleted
TESTDOMAIN="webreply.com"
TESTNAMES = [("One","Test",TESTDOMAIN),("Two","Test",TESTDOMAIN)]
TESTEMAILS = ["%s.%s@%s" % name for name in TESTNAMES]
mc = MarketoClientFactory(INIFILE)
def compareData(datafile, data):
path = os.path.join(TESTDIR,datafile+".txt")
return open(path).read().strip() == data.strip()
def test_data():
"Make sure that all the test data files are present"
assert os.path.exists(INIFILE)
for datafile in DATAFILES:
assert os.path.exists(os.path.join(TESTDIR,datafile+".txt"))
# Factory methods to build structures for arguments
def aStringArray(strings):
asa = mc.factory.create("ArrayOfString")
asa.stringItem = strings
return asa
def aLeadKey(email=None,id=None):
leadkey = mc.factory.create("LeadKey")
if email:
leadkey.keyType = "EMAIL"
leadkey.keyValue = email
elif id:
leadkey.keyType = "IDNUM"
leadkey.keyValue = id
return leadkey
def aLeadKeyArray(leads):
lka = mc.factory.create("ArrayOfLeadKey")
lka.leadKey = leads
return lka
def aListKey(lk, keyType = "MKTOLISTNAME"):
listkey = mc.factory.create("ListKey")
listkey.keyType = keyType
listkey.keyValue = lk
return listkey
def anAttrib(**kwargs):
attrib = mc.factory.create("Attrib")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttribArray(attribs):
aa = mc.factory.create("ArrayOfAttrib")
aa.attrib=attribs
return aa
def anAttribute(**kwargs):
attrib = mc.factory.create("Attribute")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttributeArray(attributes):
aa = mc.factory.create("ArrayOfAttribute")
aa.attribute=attributes
return aa
def aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):
lr = mc.factory.create("LeadRecord")
if id:
lr.Id = id
elif email:
lr.Email = email
elif foreignsyspersonid:
assert foreignsystype
lr.ForeignSysPersonId = foreignsyspersonid
lr.ForeignSysType = foreignsystype
if attributes:
lr.leadAttributeList = attributes
return lr
def aLeadRecordArray(leadrecords):
lra = mc.factory.create("ArrayOfLeadRecord")
lra.leadRecord = leadrecords
return lra
# Several things come back with an attribute list that is more pleasant as a dictionary
def attrs2dict(attributelist):
if attributelist is None:
return {}
attributelist = attributelist[0]
d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])
return d
def dict2attrs(d):
al = []
for key, value in d.items():
al.append(anAttribute(attrName=key,attrValue=value))
return anAttributeArray(al)
def test_specification():
compareData("specification", str(mc))
# As of 1.7, these are the methods
# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )
# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )
# Tested: describeMObject(xs:string objectName, )
# Requires having a trigger set for the campaign, from Marketo support:
# Your SOAP request is fine. In order for the getCampaignsForSource call to work,
# you must have a "Campaign is Requested" trigger in the your campaign set to Web Service API.
# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )
# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )
# Tested: getLead(LeadKey leadKey, )
# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )
# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )
# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )
# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )
# Tested: listMObjects()
# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )
# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )
# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )
# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )
# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )
# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )
# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )
# Campaign sources
# <xs:enumeration value="MKTOWS"/>
# <xs:enumeration value="SALES"/>
def test_getCampaignsForSource():
print "Testing getCampaignsForSource"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
resultCount = campaigns.returnCount
campaignrecords = campaigns.campaignRecordList[0]
assert resultCount==len(campaignrecords), "Result count '%s' does not match campaign list '%s'" % (resultCount, len(campaigns))
for campaign in campaignrecords:
print campaign.id, campaign.name, campaign.description
print
def test_getLead():
print "Testing getLead"
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
attrs = attrs2dict(lead.leadAttributeList)
print lead.Id, lead.Email
pprint(attrs)
if SPECIALCODE and SPECIALVALUE:
assert attrs[SPECIALCODE] == SPECIALVALUE
print
# As of 1.7, theses are the activity types
# <xs:enumeration value="VisitWebpage"/>
# <xs:enumeration value="FillOutForm"/>
# <xs:enumeration value="ClickLink"/>
# <xs:enumeration value="RegisterForEvent"/>
# <xs:enumeration value="AttendEvent"/>
# <xs:enumeration value="SendEmail"/>
# <xs:enumeration value="EmailDelivered"/>
# <xs:enumeration value="EmailBounced"/>
# <xs:enumeration value="UnsubscribeEmail"/>
# <xs:enumeration value="OpenEmail"/>
# <xs:enumeration value="ClickEmail"/>
# <xs:enumeration value="NewLead"/>
# <xs:enumeration value="ChangeDataValue"/>
# <xs:enumeration value="LeadAssigned"/>
# <xs:enumeration value="NewSFDCOpprtnty"/>
# <xs:enumeration value="Wait"/>
# <xs:enumeration value="RunSubflow"/>
# <xs:enumeration value="RemoveFromFlow"/>
# <xs:enumeration value="PushLeadToSales"/>
# <xs:enumeration value="CreateTask"/>
# <xs:enumeration value="ConvertLead"/>
# <xs:enumeration value="ChangeScore"/>
# <xs:enumeration value="ChangeOwner"/>
# <xs:enumeration value="AddToList"/>
# <xs:enumeration value="RemoveFromList"/>
# <xs:enumeration value="SFDCActivity"/>
# <xs:enumeration value="EmailBouncedSoft"/>
# <xs:enumeration value="PushLeadUpdatesToSales"/>
# <xs:enumeration value="DeleteLeadFromSales"/>
# <xs:enumeration value="SFDCActivityUpdated"/>
# <xs:enumeration value="SFDCMergeLeads"/>
# <xs:enumeration value="MergeLeads"/>
# <xs:enumeration value="ResolveConflicts"/>
# <xs:enumeration value="AssocWithOpprtntyInSales"/>
# <xs:enumeration value="DissocFromOpprtntyInSales"/>
# <xs:enumeration value="UpdateOpprtntyInSales"/>
# <xs:enumeration value="DeleteLead"/>
# <xs:enumeration value="SendAlert"/>
# <xs:enumeration value="SendSalesEmail"/>
# <xs:enumeration value="OpenSalesEmail"/>
# <xs:enumeration value="ClickSalesEmail"/>
# <xs:enumeration value="AddtoSFDCCampaign"/>
# <xs:enumeration value="RemoveFromSFDCCampaign"/>
# <xs:enumeration value="ChangeStatusInSFDCCampaign"/>
# <xs:enumeration value="ReceiveSalesEmail"/>
# <xs:enumeration value="InterestingMoment"/>
# <xs:enumeration value="RequestCampaign"/>
# <xs:enumeration value="SalesEmailBounced"/>
# <xs:enumeration value="ChangeLeadPartition"/>
# <xs:enumeration value="ChangeRevenueStage"/>
# <xs:enumeration value="ChangeRevenueStageManually"/>
# <xs:enumeration value="ComputeDataValue"/>
# <xs:enumeration value="ChangeStatusInProgression"/>
# <xs:enumeration value="ChangeFieldInProgram"/>
# <xs:enumeration value="EnrichWithJigsaw"/>
def test_getLeadActivity():
print "Testing getLeadActivity"
leadkey = aLeadKey(email=LEADEMAIL)
activities = mc.service.getLeadActivity(leadkey,"")
assert activities.returnCount > 0
activityrecords = activities.activityRecordList[0]
assert len(activityrecords) == activities.returnCount
for activity in activityrecords:
print "Activity", activity.activityDateTime,activity.activityType
attrs = attrs2dict(activity.activityAttributes)
pprint(attrs)
print
def test_requestCampaign():
print "Testing requestCampaign"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == TESTCAMPAIGN:
print "Found", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
leadid = lead.Id
# Add key appears to want ID
leadkey = aLeadKey(id=leadid)
lka = aLeadKeyArray([leadkey])
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
assert result.success
print
def test_deleteLeads():
# Depends on a campaign that deletes leads as they ar added
# We also need to know the IDNUM for the contacts
lka = []
for email in TESTEMAILS:
leadkey = aLeadKey(email=email)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
lka.append(aLeadKey(id=lead.Id))
print "Found lead", lead.Id, lead.Email
lka = aLeadKeyArray(lka)
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == DELETECAMPAIGN:
print "Found campaign", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
print result
def test_getLeadChanges():
print "Testing getLeadChanges"
since = datetime.datetime(year=2010,month=1, day=1)
changes = mc.service.getLeadChanges("",since,10)
assert changes.returnCount == 10
changerecords = changes.leadChangeRecordList[0]
assert len(changerecords) == changes.returnCount
for change in changerecords:
print "leadChange", change.activityDateTime,change.activityType
pprint(attrs2dict(change.activityAttributes))
print
def test_getMultipleLeads():
print "Testing getMultipleLeads"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
|
print
def test_getMultipleLeadsUnsubscribedFlag():
print "Testing getMultipleLeadsUnsubscribedFlag"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
attributelist = aStringArray(["Suppressed"])
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
# Valid list operations as of 1.7
# <xs:enumeration value="ADDTOLIST"/>
# <xs:enumeration value="ISMEMBEROFLIST"/>
# <xs:enumeration value="REMOVEFROMLIST"/>
# Valid list types
# <xs:enumeration value="MKTOLISTNAME"/>
# <xs:enumeration value="MKTOSALESUSERID"/>
# <xs:enumeration value="SFDCLEADOWNERID"/>
def test_listOperation():
print "Testing listOperation"
# Require numeric id fields
leadkey = aLeadKey(id=1256) # Is member
leadkey2 = aLeadKey(id=1) # Is not member
result = mc.service.listOperation("ISMEMBEROFLIST",aListKey(LEADLIST),
aLeadKeyArray([leadkey,leadkey2]),True)
print "listOperation", result
def test_syncLead():
print "Testing syncLead"
# This test does a create the first time only.
# The name and email are used in the "standard" marketo API examples
attrs = dict(FirstName="Sam",LastName="Haggy")
leadrecord = aLeadRecord(email="shaggy@marketo.com",attributes=dict2attrs(attrs))
result = mc.service.syncLead(leadrecord, True, None)
print result.leadId, result.syncStatus.status
def test_syncMultipleLeads():
print "Testing syncMultipleLeads"
leadrecords = []
for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):
leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))
leadrecords.append(leadrecord)
lra = aLeadRecordArray(leadrecords)
print lra
result = mc.service.syncMultipleLeads(lra)
print result
print
def test_listMObjects():
print "Testing listMObjects"
mobjects = mc.service.listMObjects()
compareData("listMObjects", str(mobjects))
print
def test_describeMObject():
print "Testing describeMObject"
mobjects = ["ActivityRecord","LeadRecord","Opportunity","OpportunityPersonRole",]
descriptions = []
for mobject in mobjects:
descriptions.append(str(mc.service.describeMObject(mobject)))
descriptions = "\n".join(descriptions)
compareData("describeMObjects", descriptions)
print
if __name__ == "__main__":
test_data()
test_specification()
test_getLead()
test_getCampaignsForSource()
test_requestCampaign()
test_getLeadActivity()
test_getLeadChanges()
test_listMObjects()
test_describeMObject()
test_getLeadActivity()
test_getMultipleLeads()
test_getMultipleLeadsUnsubscribedFlag()
test_listOperation()
test_syncLead()
test_syncMultipleLeads()
test_deleteLeads()
print "All is well"
| attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs) | conditional_block |
post.rs | //! `post` table parsing and writing.
use std::str;
use crate::binary::read::{ReadArray, ReadBinary, ReadCtxt};
use crate::binary::write::{WriteBinary, WriteContext};
use crate::binary::{I16Be, I32Be, U16Be, U32Be, U8};
use crate::error::{ParseError, WriteError};
pub struct PostTable<'a> {
pub header: Header,
pub opt_sub_table: Option<SubTable<'a>>,
}
pub struct Header {
pub version: i32,
pub italic_angle: i32,
pub underline_position: i16,
pub underline_thickness: i16,
pub is_fixed_pitch: u32,
pub min_mem_type_42: u32,
pub max_mem_type_42: u32,
pub min_mem_type_1: u32,
pub max_mem_type_1: u32,
}
pub struct SubTable<'a> {
pub num_glyphs: u16,
pub glyph_name_index: ReadArray<'a, U16Be>,
pub names: Vec<PascalString<'a>>,
}
#[derive(Clone)]
pub struct PascalString<'a> {
pub bytes: &'a [u8],
}
impl ReadBinary for Header {
type HostType<'b> = Self;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self, ParseError> {
let version = ctxt.read_i32be()?;
let italic_angle = ctxt.read_i32be()?;
let underline_position = ctxt.read_i16be()?;
let underline_thickness = ctxt.read_i16be()?;
let is_fixed_pitch = ctxt.read_u32be()?;
let min_mem_type_42 = ctxt.read_u32be()?;
let max_mem_type_42 = ctxt.read_u32be()?;
let min_mem_type_1 = ctxt.read_u32be()?;
let max_mem_type_1 = ctxt.read_u32be()?;
Ok(Header {
version,
italic_angle,
underline_position,
underline_thickness,
is_fixed_pitch,
min_mem_type_42,
max_mem_type_42,
min_mem_type_1,
max_mem_type_1,
})
}
}
impl WriteBinary<&Self> for Header {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &Header) -> Result<(), WriteError> {
I32Be::write(ctxt, table.version)?;
I32Be::write(ctxt, table.italic_angle)?;
I16Be::write(ctxt, table.underline_position)?;
I16Be::write(ctxt, table.underline_thickness)?;
U32Be::write(ctxt, table.is_fixed_pitch)?;
U32Be::write(ctxt, table.min_mem_type_42)?;
U32Be::write(ctxt, table.max_mem_type_42)?;
U32Be::write(ctxt, table.min_mem_type_1)?;
U32Be::write(ctxt, table.max_mem_type_1)?;
Ok(())
}
}
impl<'b> ReadBinary for PostTable<'b> {
type HostType<'a> = PostTable<'a>;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self::HostType<'a>, ParseError> {
let header = ctxt.read::<Header>()?;
let opt_sub_table = match header.version {
0x00020000 => {
// May include some Format 1 glyphs
let num_glyphs = ctxt.read_u16be()?;
let glyph_name_index = ctxt.read_array(usize::from(num_glyphs))?;
// Find the largest index used and use that to determine how many names to read
let names_to_read = glyph_name_index.iter().max().map_or(0, |max| {
(usize::from(max) + 1).saturating_sub(FORMAT_1_NAMES.len())
});
// Read the names
let mut names = Vec::with_capacity(names_to_read);
for _ in 0..names_to_read {
let length = ctxt.read_u8()?;
let bytes = ctxt.read_slice(usize::from(length))?;
names.push(PascalString { bytes });
}
Some(SubTable {
num_glyphs,
glyph_name_index,
names,
})
}
// TODO Handle post version 1.0, 2.5, 3.0
0x00010000 | 0x00025000 | 0x00030000 => None,
_ => return Err(ParseError::BadVersion),
};
Ok(PostTable {
header,
opt_sub_table,
})
}
}
impl<'a> WriteBinary<&Self> for PostTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &PostTable<'a>) -> Result<(), WriteError> {
Header::write(ctxt, &table.header)?;
if let Some(sub_table) = &table.opt_sub_table {
SubTable::write(ctxt, sub_table)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for SubTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &SubTable<'a>) -> Result<(), WriteError> {
U16Be::write(ctxt, table.num_glyphs)?;
<&ReadArray<'_, _>>::write(ctxt, &table.glyph_name_index)?;
for name in &table.names {
PascalString::write(ctxt, name)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for PascalString<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, string: &PascalString<'a>) -> Result<(), WriteError> {
if string.bytes.len() <= usize::from(std::u8::MAX) {
// cast is safe due to check above
U8::write(ctxt, string.bytes.len() as u8)?;
ctxt.write_bytes(string.bytes)?;
Ok(())
} else {
Err(WriteError::BadValue)
}
}
}
impl<'a> PostTable<'a> {
/// Retrieve the glyph name for the supplied `glyph_index`.
///
/// **Note:** Some fonts map more than one glyph to the same name so don't assume names are
/// unique.
pub fn glyph_name(&self, glyph_index: u16) -> Result<Option<&'a str>, ParseError> {
if let Some(sub_table) = &self.opt_sub_table {
if glyph_index >= sub_table.num_glyphs {
return Ok(None);
}
}
match &self.header.version {
0x00010000 if usize::from(glyph_index) < FORMAT_1_NAMES.len() => {
let name = FORMAT_1_NAMES[usize::from(glyph_index)];
Ok(Some(name))
}
0x00020000 => match &self.opt_sub_table {
Some(sub_table) => {
let name_index = sub_table
.glyph_name_index
.get_item(usize::from(glyph_index));
if usize::from(name_index) < FORMAT_1_NAMES.len() {
Ok(Some(FORMAT_1_NAMES[usize::from(name_index)]))
} else {
let index = usize::from(name_index) - FORMAT_1_NAMES.len();
let pascal_string = &sub_table.names[index];
match str::from_utf8(pascal_string.bytes) {
Ok(name) => Ok(Some(name)),
Err(_) => Err(ParseError::BadValue),
}
}
}
// If the table is version 2, the sub-table should exist
None => Err(ParseError::BadValue),
},
_ => Ok(None),
}
}
}
static FORMAT_1_NAMES: &[&str; 258] = &[
".notdef",
".null",
"nonmarkingreturn",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot", | "florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nonbreakingspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Zcaron",
"zcaron",
"brokenbar",
"Eth",
"eth",
"Yacute",
"yacute",
"Thorn",
"thorn",
"minus",
"multiply",
"onesuperior",
"twosuperior",
"threesuperior",
"onehalf",
"onequarter",
"threequarters",
"franc",
"Gbreve",
"gbreve",
"Idotaccent",
"Scedilla",
"scedilla",
"Cacute",
"cacute",
"Ccaron",
"ccaron",
"dcroat",
];
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::read::ReadScope;
use crate::binary::write::WriteBuffer;
#[test]
fn duplicate_glyph_names() {
// Test for post table that maps multiple glyphs to the same name index. Before a fix was
// implemented this table failed to parse.
let post_data = include_bytes!("../tests/fonts/opentype/post.bin");
let post = ReadScope::new(post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
match post.opt_sub_table {
Some(ref sub_table) => assert_eq!(sub_table.names.len(), 1872),
None => panic!("expected post table to have a sub-table"),
}
// These map to the same index (397)
assert_eq!(post.glyph_name(257).unwrap().unwrap(), "Ldot");
assert_eq!(post.glyph_name(1442).unwrap().unwrap(), "Ldot");
}
fn build_post_with_unused_names() -> Result<Vec<u8>, WriteError> {
// Build a post table with unused name entries
let mut w = WriteBuffer::new();
let header = Header {
version: 0x00020000,
italic_angle: 0,
underline_position: 0,
underline_thickness: 0,
is_fixed_pitch: 0,
min_mem_type_42: 0,
max_mem_type_42: 0,
min_mem_type_1: 0,
max_mem_type_1: 0,
};
Header::write(&mut w, &header)?;
let num_glyphs = 10u16;
U16Be::write(&mut w, num_glyphs)?;
// Write name indexes that have unused names between each used entry
U16Be::write(&mut w, 0u16)?; // .notdef
for i in 0..(num_glyphs - 1) {
U16Be::write(&mut w, i * 2 + 258)?;
}
// Write the names
for i in 1..num_glyphs {
// Write a real entry
let name = format!("gid{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
// Then the unused one in between
let name = format!("unused{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
}
Ok(w.into_inner())
}
#[test]
fn unused_glyph_names() {
let post_data = build_post_with_unused_names().expect("unable to build post table");
let post = ReadScope::new(&post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
let num_glyphs = post.opt_sub_table.as_ref().unwrap().num_glyphs;
for i in 0..num_glyphs {
let expected = if i == 0 {
String::from(".notdef")
} else {
format!("gid{}", i)
};
assert_eq!(post.glyph_name(i).unwrap().unwrap(), &expected);
}
}
} | "radical", | random_line_split |
post.rs | //! `post` table parsing and writing.
use std::str;
use crate::binary::read::{ReadArray, ReadBinary, ReadCtxt};
use crate::binary::write::{WriteBinary, WriteContext};
use crate::binary::{I16Be, I32Be, U16Be, U32Be, U8};
use crate::error::{ParseError, WriteError};
pub struct PostTable<'a> {
pub header: Header,
pub opt_sub_table: Option<SubTable<'a>>,
}
pub struct Header {
pub version: i32,
pub italic_angle: i32,
pub underline_position: i16,
pub underline_thickness: i16,
pub is_fixed_pitch: u32,
pub min_mem_type_42: u32,
pub max_mem_type_42: u32,
pub min_mem_type_1: u32,
pub max_mem_type_1: u32,
}
pub struct SubTable<'a> {
pub num_glyphs: u16,
pub glyph_name_index: ReadArray<'a, U16Be>,
pub names: Vec<PascalString<'a>>,
}
#[derive(Clone)]
pub struct PascalString<'a> {
pub bytes: &'a [u8],
}
impl ReadBinary for Header {
type HostType<'b> = Self;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self, ParseError> {
let version = ctxt.read_i32be()?;
let italic_angle = ctxt.read_i32be()?;
let underline_position = ctxt.read_i16be()?;
let underline_thickness = ctxt.read_i16be()?;
let is_fixed_pitch = ctxt.read_u32be()?;
let min_mem_type_42 = ctxt.read_u32be()?;
let max_mem_type_42 = ctxt.read_u32be()?;
let min_mem_type_1 = ctxt.read_u32be()?;
let max_mem_type_1 = ctxt.read_u32be()?;
Ok(Header {
version,
italic_angle,
underline_position,
underline_thickness,
is_fixed_pitch,
min_mem_type_42,
max_mem_type_42,
min_mem_type_1,
max_mem_type_1,
})
}
}
impl WriteBinary<&Self> for Header {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &Header) -> Result<(), WriteError> {
I32Be::write(ctxt, table.version)?;
I32Be::write(ctxt, table.italic_angle)?;
I16Be::write(ctxt, table.underline_position)?;
I16Be::write(ctxt, table.underline_thickness)?;
U32Be::write(ctxt, table.is_fixed_pitch)?;
U32Be::write(ctxt, table.min_mem_type_42)?;
U32Be::write(ctxt, table.max_mem_type_42)?;
U32Be::write(ctxt, table.min_mem_type_1)?;
U32Be::write(ctxt, table.max_mem_type_1)?;
Ok(())
}
}
impl<'b> ReadBinary for PostTable<'b> {
type HostType<'a> = PostTable<'a>;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self::HostType<'a>, ParseError> {
let header = ctxt.read::<Header>()?;
let opt_sub_table = match header.version {
0x00020000 => {
// May include some Format 1 glyphs
let num_glyphs = ctxt.read_u16be()?;
let glyph_name_index = ctxt.read_array(usize::from(num_glyphs))?;
// Find the largest index used and use that to determine how many names to read
let names_to_read = glyph_name_index.iter().max().map_or(0, |max| {
(usize::from(max) + 1).saturating_sub(FORMAT_1_NAMES.len())
});
// Read the names
let mut names = Vec::with_capacity(names_to_read);
for _ in 0..names_to_read {
let length = ctxt.read_u8()?;
let bytes = ctxt.read_slice(usize::from(length))?;
names.push(PascalString { bytes });
}
Some(SubTable {
num_glyphs,
glyph_name_index,
names,
})
}
// TODO Handle post version 1.0, 2.5, 3.0
0x00010000 | 0x00025000 | 0x00030000 => None,
_ => return Err(ParseError::BadVersion),
};
Ok(PostTable {
header,
opt_sub_table,
})
}
}
impl<'a> WriteBinary<&Self> for PostTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &PostTable<'a>) -> Result<(), WriteError> {
Header::write(ctxt, &table.header)?;
if let Some(sub_table) = &table.opt_sub_table {
SubTable::write(ctxt, sub_table)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for SubTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &SubTable<'a>) -> Result<(), WriteError> {
U16Be::write(ctxt, table.num_glyphs)?;
<&ReadArray<'_, _>>::write(ctxt, &table.glyph_name_index)?;
for name in &table.names {
PascalString::write(ctxt, name)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for PascalString<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, string: &PascalString<'a>) -> Result<(), WriteError> {
if string.bytes.len() <= usize::from(std::u8::MAX) {
// cast is safe due to check above
U8::write(ctxt, string.bytes.len() as u8)?;
ctxt.write_bytes(string.bytes)?;
Ok(())
} else {
Err(WriteError::BadValue)
}
}
}
impl<'a> PostTable<'a> {
/// Retrieve the glyph name for the supplied `glyph_index`.
///
/// **Note:** Some fonts map more than one glyph to the same name so don't assume names are
/// unique.
pub fn glyph_name(&self, glyph_index: u16) -> Result<Option<&'a str>, ParseError> {
if let Some(sub_table) = &self.opt_sub_table {
if glyph_index >= sub_table.num_glyphs {
return Ok(None);
}
}
match &self.header.version {
0x00010000 if usize::from(glyph_index) < FORMAT_1_NAMES.len() => {
let name = FORMAT_1_NAMES[usize::from(glyph_index)];
Ok(Some(name))
}
0x00020000 => match &self.opt_sub_table {
Some(sub_table) => {
let name_index = sub_table
.glyph_name_index
.get_item(usize::from(glyph_index));
if usize::from(name_index) < FORMAT_1_NAMES.len() {
Ok(Some(FORMAT_1_NAMES[usize::from(name_index)]))
} else {
let index = usize::from(name_index) - FORMAT_1_NAMES.len();
let pascal_string = &sub_table.names[index];
match str::from_utf8(pascal_string.bytes) {
Ok(name) => Ok(Some(name)),
Err(_) => Err(ParseError::BadValue),
}
}
}
// If the table is version 2, the sub-table should exist
None => Err(ParseError::BadValue),
},
_ => Ok(None),
}
}
}
static FORMAT_1_NAMES: &[&str; 258] = &[
".notdef",
".null",
"nonmarkingreturn",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nonbreakingspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Zcaron",
"zcaron",
"brokenbar",
"Eth",
"eth",
"Yacute",
"yacute",
"Thorn",
"thorn",
"minus",
"multiply",
"onesuperior",
"twosuperior",
"threesuperior",
"onehalf",
"onequarter",
"threequarters",
"franc",
"Gbreve",
"gbreve",
"Idotaccent",
"Scedilla",
"scedilla",
"Cacute",
"cacute",
"Ccaron",
"ccaron",
"dcroat",
];
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::read::ReadScope;
use crate::binary::write::WriteBuffer;
#[test]
fn duplicate_glyph_names() {
// Test for post table that maps multiple glyphs to the same name index. Before a fix was
// implemented this table failed to parse.
let post_data = include_bytes!("../tests/fonts/opentype/post.bin");
let post = ReadScope::new(post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
match post.opt_sub_table {
Some(ref sub_table) => assert_eq!(sub_table.names.len(), 1872),
None => panic!("expected post table to have a sub-table"),
}
// These map to the same index (397)
assert_eq!(post.glyph_name(257).unwrap().unwrap(), "Ldot");
assert_eq!(post.glyph_name(1442).unwrap().unwrap(), "Ldot");
}
fn build_post_with_unused_names() -> Result<Vec<u8>, WriteError> |
#[test]
fn unused_glyph_names() {
let post_data = build_post_with_unused_names().expect("unable to build post table");
let post = ReadScope::new(&post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
let num_glyphs = post.opt_sub_table.as_ref().unwrap().num_glyphs;
for i in 0..num_glyphs {
let expected = if i == 0 {
String::from(".notdef")
} else {
format!("gid{}", i)
};
assert_eq!(post.glyph_name(i).unwrap().unwrap(), &expected);
}
}
}
| {
// Build a post table with unused name entries
let mut w = WriteBuffer::new();
let header = Header {
version: 0x00020000,
italic_angle: 0,
underline_position: 0,
underline_thickness: 0,
is_fixed_pitch: 0,
min_mem_type_42: 0,
max_mem_type_42: 0,
min_mem_type_1: 0,
max_mem_type_1: 0,
};
Header::write(&mut w, &header)?;
let num_glyphs = 10u16;
U16Be::write(&mut w, num_glyphs)?;
// Write name indexes that have unused names between each used entry
U16Be::write(&mut w, 0u16)?; // .notdef
for i in 0..(num_glyphs - 1) {
U16Be::write(&mut w, i * 2 + 258)?;
}
// Write the names
for i in 1..num_glyphs {
// Write a real entry
let name = format!("gid{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
// Then the unused one in between
let name = format!("unused{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
}
Ok(w.into_inner())
} | identifier_body |
post.rs | //! `post` table parsing and writing.
use std::str;
use crate::binary::read::{ReadArray, ReadBinary, ReadCtxt};
use crate::binary::write::{WriteBinary, WriteContext};
use crate::binary::{I16Be, I32Be, U16Be, U32Be, U8};
use crate::error::{ParseError, WriteError};
pub struct PostTable<'a> {
pub header: Header,
pub opt_sub_table: Option<SubTable<'a>>,
}
pub struct Header {
pub version: i32,
pub italic_angle: i32,
pub underline_position: i16,
pub underline_thickness: i16,
pub is_fixed_pitch: u32,
pub min_mem_type_42: u32,
pub max_mem_type_42: u32,
pub min_mem_type_1: u32,
pub max_mem_type_1: u32,
}
pub struct SubTable<'a> {
pub num_glyphs: u16,
pub glyph_name_index: ReadArray<'a, U16Be>,
pub names: Vec<PascalString<'a>>,
}
#[derive(Clone)]
pub struct PascalString<'a> {
pub bytes: &'a [u8],
}
impl ReadBinary for Header {
type HostType<'b> = Self;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self, ParseError> {
let version = ctxt.read_i32be()?;
let italic_angle = ctxt.read_i32be()?;
let underline_position = ctxt.read_i16be()?;
let underline_thickness = ctxt.read_i16be()?;
let is_fixed_pitch = ctxt.read_u32be()?;
let min_mem_type_42 = ctxt.read_u32be()?;
let max_mem_type_42 = ctxt.read_u32be()?;
let min_mem_type_1 = ctxt.read_u32be()?;
let max_mem_type_1 = ctxt.read_u32be()?;
Ok(Header {
version,
italic_angle,
underline_position,
underline_thickness,
is_fixed_pitch,
min_mem_type_42,
max_mem_type_42,
min_mem_type_1,
max_mem_type_1,
})
}
}
impl WriteBinary<&Self> for Header {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &Header) -> Result<(), WriteError> {
I32Be::write(ctxt, table.version)?;
I32Be::write(ctxt, table.italic_angle)?;
I16Be::write(ctxt, table.underline_position)?;
I16Be::write(ctxt, table.underline_thickness)?;
U32Be::write(ctxt, table.is_fixed_pitch)?;
U32Be::write(ctxt, table.min_mem_type_42)?;
U32Be::write(ctxt, table.max_mem_type_42)?;
U32Be::write(ctxt, table.min_mem_type_1)?;
U32Be::write(ctxt, table.max_mem_type_1)?;
Ok(())
}
}
impl<'b> ReadBinary for PostTable<'b> {
type HostType<'a> = PostTable<'a>;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self::HostType<'a>, ParseError> {
let header = ctxt.read::<Header>()?;
let opt_sub_table = match header.version {
0x00020000 => {
// May include some Format 1 glyphs
let num_glyphs = ctxt.read_u16be()?;
let glyph_name_index = ctxt.read_array(usize::from(num_glyphs))?;
// Find the largest index used and use that to determine how many names to read
let names_to_read = glyph_name_index.iter().max().map_or(0, |max| {
(usize::from(max) + 1).saturating_sub(FORMAT_1_NAMES.len())
});
// Read the names
let mut names = Vec::with_capacity(names_to_read);
for _ in 0..names_to_read {
let length = ctxt.read_u8()?;
let bytes = ctxt.read_slice(usize::from(length))?;
names.push(PascalString { bytes });
}
Some(SubTable {
num_glyphs,
glyph_name_index,
names,
})
}
// TODO Handle post version 1.0, 2.5, 3.0
0x00010000 | 0x00025000 | 0x00030000 => None,
_ => return Err(ParseError::BadVersion),
};
Ok(PostTable {
header,
opt_sub_table,
})
}
}
impl<'a> WriteBinary<&Self> for PostTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &PostTable<'a>) -> Result<(), WriteError> {
Header::write(ctxt, &table.header)?;
if let Some(sub_table) = &table.opt_sub_table {
SubTable::write(ctxt, sub_table)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for SubTable<'a> {
type Output = ();
fn | <C: WriteContext>(ctxt: &mut C, table: &SubTable<'a>) -> Result<(), WriteError> {
U16Be::write(ctxt, table.num_glyphs)?;
<&ReadArray<'_, _>>::write(ctxt, &table.glyph_name_index)?;
for name in &table.names {
PascalString::write(ctxt, name)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for PascalString<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, string: &PascalString<'a>) -> Result<(), WriteError> {
if string.bytes.len() <= usize::from(std::u8::MAX) {
// cast is safe due to check above
U8::write(ctxt, string.bytes.len() as u8)?;
ctxt.write_bytes(string.bytes)?;
Ok(())
} else {
Err(WriteError::BadValue)
}
}
}
impl<'a> PostTable<'a> {
/// Retrieve the glyph name for the supplied `glyph_index`.
///
/// **Note:** Some fonts map more than one glyph to the same name so don't assume names are
/// unique.
pub fn glyph_name(&self, glyph_index: u16) -> Result<Option<&'a str>, ParseError> {
if let Some(sub_table) = &self.opt_sub_table {
if glyph_index >= sub_table.num_glyphs {
return Ok(None);
}
}
match &self.header.version {
0x00010000 if usize::from(glyph_index) < FORMAT_1_NAMES.len() => {
let name = FORMAT_1_NAMES[usize::from(glyph_index)];
Ok(Some(name))
}
0x00020000 => match &self.opt_sub_table {
Some(sub_table) => {
let name_index = sub_table
.glyph_name_index
.get_item(usize::from(glyph_index));
if usize::from(name_index) < FORMAT_1_NAMES.len() {
Ok(Some(FORMAT_1_NAMES[usize::from(name_index)]))
} else {
let index = usize::from(name_index) - FORMAT_1_NAMES.len();
let pascal_string = &sub_table.names[index];
match str::from_utf8(pascal_string.bytes) {
Ok(name) => Ok(Some(name)),
Err(_) => Err(ParseError::BadValue),
}
}
}
// If the table is version 2, the sub-table should exist
None => Err(ParseError::BadValue),
},
_ => Ok(None),
}
}
}
static FORMAT_1_NAMES: &[&str; 258] = &[
".notdef",
".null",
"nonmarkingreturn",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nonbreakingspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Zcaron",
"zcaron",
"brokenbar",
"Eth",
"eth",
"Yacute",
"yacute",
"Thorn",
"thorn",
"minus",
"multiply",
"onesuperior",
"twosuperior",
"threesuperior",
"onehalf",
"onequarter",
"threequarters",
"franc",
"Gbreve",
"gbreve",
"Idotaccent",
"Scedilla",
"scedilla",
"Cacute",
"cacute",
"Ccaron",
"ccaron",
"dcroat",
];
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::read::ReadScope;
use crate::binary::write::WriteBuffer;
#[test]
fn duplicate_glyph_names() {
// Test for post table that maps multiple glyphs to the same name index. Before a fix was
// implemented this table failed to parse.
let post_data = include_bytes!("../tests/fonts/opentype/post.bin");
let post = ReadScope::new(post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
match post.opt_sub_table {
Some(ref sub_table) => assert_eq!(sub_table.names.len(), 1872),
None => panic!("expected post table to have a sub-table"),
}
// These map to the same index (397)
assert_eq!(post.glyph_name(257).unwrap().unwrap(), "Ldot");
assert_eq!(post.glyph_name(1442).unwrap().unwrap(), "Ldot");
}
fn build_post_with_unused_names() -> Result<Vec<u8>, WriteError> {
// Build a post table with unused name entries
let mut w = WriteBuffer::new();
let header = Header {
version: 0x00020000,
italic_angle: 0,
underline_position: 0,
underline_thickness: 0,
is_fixed_pitch: 0,
min_mem_type_42: 0,
max_mem_type_42: 0,
min_mem_type_1: 0,
max_mem_type_1: 0,
};
Header::write(&mut w, &header)?;
let num_glyphs = 10u16;
U16Be::write(&mut w, num_glyphs)?;
// Write name indexes that have unused names between each used entry
U16Be::write(&mut w, 0u16)?; // .notdef
for i in 0..(num_glyphs - 1) {
U16Be::write(&mut w, i * 2 + 258)?;
}
// Write the names
for i in 1..num_glyphs {
// Write a real entry
let name = format!("gid{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
// Then the unused one in between
let name = format!("unused{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
}
Ok(w.into_inner())
}
#[test]
fn unused_glyph_names() {
let post_data = build_post_with_unused_names().expect("unable to build post table");
let post = ReadScope::new(&post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
let num_glyphs = post.opt_sub_table.as_ref().unwrap().num_glyphs;
for i in 0..num_glyphs {
let expected = if i == 0 {
String::from(".notdef")
} else {
format!("gid{}", i)
};
assert_eq!(post.glyph_name(i).unwrap().unwrap(), &expected);
}
}
}
| write | identifier_name |
viewer.js | "use strict";
var Widget = require('./widget').Widget,
util = require('../util');
var $ = util.$,
_t = util.gettext;
var NS = 'annotator-viewer';
// Private: simple parser for hypermedia link structure
//
// Examples:
//
// links = [
// {
// rel: 'alternate',
// href: 'http://example.com/pages/14.json',
// type: 'application/json'
// },
// {
// rel: 'prev':
// href: 'http://example.com/pages/13'
// }
// ]
//
// parseLinks(links, 'alternate')
// # => [{rel: 'alternate', href: 'http://...', ... }]
// parseLinks(links, 'alternate', {type: 'text/html'})
// # => []
//
function parseLinks(data, rel, cond) |
// Public: Creates an element for viewing annotations.
var Viewer = exports.Viewer = Widget.extend({
// Public: Creates an instance of the Viewer object.
//
// options - An Object containing options.
//
// Examples
//
// # Creates a new viewer, adds a custom field and displays an annotation.
// viewer = new Viewer()
// viewer.addField({
// load: someLoadCallback
// })
// viewer.load(annotation)
//
// Returns a new Viewer instance.
constructor: function (options) {
Widget.call(this, options);
this.itemTemplate = Viewer.itemTemplate;
this.fields = [];
this.annotations = [];
this.hideTimer = null;
this.hideTimerDfd = null;
this.hideTimerActivity = null;
this.mouseDown = false;
this.render = function (annotation) {
if (annotation.text) {
return util.escapeHtml(annotation.text);
} else {
return "<i>" + _t('No comment') + "</i>";
}
};
var self = this;
if (this.options.defaultFields) {
this.addField({
load: function (field, annotation) {
$(field).html(self.render(annotation));
}
});
}
if (typeof this.options.onEdit !== 'function') {
throw new TypeError("onEdit callback must be a function");
}
if (typeof this.options.onDelete !== 'function') {
throw new TypeError("onDelete callback must be a function");
}
if (typeof this.options.permitEdit !== 'function') {
throw new TypeError("permitEdit callback must be a function");
}
if (typeof this.options.permitDelete !== 'function') {
throw new TypeError("permitDelete callback must be a function");
}
if (this.options.autoViewHighlights) {
this.document = this.options.autoViewHighlights.ownerDocument;
$(this.options.autoViewHighlights)
.on("mouseover." + NS, '.annotator-hl', function (event) {
// If there are many overlapping highlights, still only
// call _onHighlightMouseover once.
if (event.target === this) {
self._onHighlightMouseover(event);
}
})
.on("mouseleave." + NS, '.annotator-hl', function () {
self._startHideTimer();
});
$(this.document.body)
.on("mousedown." + NS, function (e) {
if (e.which === 1) {
self.mouseDown = true;
}
})
.on("mouseup." + NS, function (e) {
if (e.which === 1) {
self.mouseDown = false;
}
});
}
this.element
.on("click." + NS, '.annotator-edit', function (e) {
self._onEditClick(e);
})
.on("click." + NS, '.annotator-delete', function (e) {
self._onDeleteClick(e);
})
.on("mouseenter." + NS, function () {
self._clearHideTimer();
})
.on("mouseleave." + NS, function () {
self._startHideTimer();
});
},
destroy: function () {
if (this.options.autoViewHighlights) {
$(this.options.autoViewHighlights).off("." + NS);
$(this.document.body).off("." + NS);
}
this.element.off("." + NS);
Widget.prototype.destroy.call(this);
},
// Public: Show the viewer.
//
// position - An Object specifying the position in which to show the editor
// (optional).
//
// Examples
//
// viewer.show()
// viewer.hide()
// viewer.show({top: '100px', left: '80px'})
//
// Returns nothing.
show: function (position) {
if (typeof position !== 'undefined' && position !== null) {
this.element.css({
top: position.top,
left: position.left
});
}
var controls = this.element
.find('.annotator-controls')
.addClass(this.classes.showControls);
var self = this;
setTimeout(function () {
controls.removeClass(self.classes.showControls);
}, 500);
Widget.prototype.show.call(this);
},
// Public: Load annotations into the viewer and show it.
//
// annotation - An Array of annotations.
//
// Examples
//
// viewer.load([annotation1, annotation2, annotation3])
//
// Returns nothing.
load: function (annotations, position) {
this.annotations = annotations || [];
var list = this.element.find('ul:first').empty();
for (var i = 0, len = this.annotations.length; i < len; i++) {
var annotation = this.annotations[i];
this._annotationItem(annotation)
.appendTo(list)
.data('annotation', annotation);
}
this.show(position);
},
// Public: Set the annotation renderer.
//
// renderer - A function that accepts an annotation and returns HTML.
//
// Returns nothing.
setRenderer: function (renderer) {
this.render = renderer;
},
// Private: create the list item for a single annotation
_annotationItem: function (annotation) {
var item = $(this.itemTemplate).clone();
var controls = item.find('.annotator-controls'),
link = controls.find('.annotator-link'),
edit = controls.find('.annotator-edit'),
del = controls.find('.annotator-delete');
var links = parseLinks(
annotation.links || [],
'alternate',
{'type': 'text/html'}
);
var hasValidLink = (links.length > 0 &&
typeof links[0].href !== 'undefined' &&
links[0].href !== null);
if (hasValidLink) {
link.attr('href', links[0].href);
} else {
link.remove();
}
var controller = {};
if (this.options.permitEdit(annotation)) {
controller.showEdit = function () {
edit.removeAttr('disabled');
};
controller.hideEdit = function () {
edit.attr('disabled', 'disabled');
};
} else {
edit.remove();
}
if (this.options.permitDelete(annotation)) {
controller.showDelete = function () {
del.removeAttr('disabled');
};
controller.hideDelete = function () {
del.attr('disabled', 'disabled');
};
} else {
del.remove();
}
for (var i = 0, len = this.fields.length; i < len; i++) {
var field = this.fields[i];
var element = $(field.element).clone().appendTo(item)[0];
field.load(element, annotation, controller);
}
return item;
},
// Public: Adds an addional field to an annotation view. A callback can be
// provided to update the view on load.
//
// options - An options Object. Options are as follows:
// load - Callback Function called when the view is loaded with an
// annotation. Recieves a newly created clone of an item
// and the annotation to be displayed (it will be called
// once for each annotation being loaded).
//
// Examples
//
// # Display a user name.
// viewer.addField({
// # This is called when the viewer is loaded.
// load: (field, annotation) ->
// field = $(field)
//
// if annotation.user
// field.text(annotation.user) # Display the user
// else
// field.remove() # Do not display the field.
// })
//
// Returns itself.
addField: function (options) {
var field = $.extend({
load: function () {}
}, options);
field.element = $('<div />')[0];
this.fields.push(field);
return this;
},
// Event callback: called when the edit button is clicked.
//
// event - An Event object.
//
// Returns nothing.
_onEditClick: function (event) {
var item = $(event.target)
.parents('.annotator-annotation')
.data('annotation');
this.hide();
this.options.onEdit(item);
},
// Event callback: called when the delete button is clicked.
//
// event - An Event object.
//
// Returns nothing.
_onDeleteClick: function (event) {
var item = $(event.target)
.parents('.annotator-annotation')
.data('annotation');
this.hide();
this.options.onDelete(item);
},
// Event callback: called when a user triggers `mouseover` on a highlight
// element.
//
// event - An Event object.
//
// Returns nothing.
_onHighlightMouseover: function (event) {
// If the mouse button is currently depressed, we're probably trying to
// make a selection, so we shouldn't show the viewer.
if (this.mouseDown) {
return;
}
var self = this;
this._startHideTimer(true)
.done(function () {
var annotations = $(event.target)
.parents('.annotator-hl')
.addBack()
.map(function (_, elem) {
return $(elem).data("annotation");
})
.toArray();
// Now show the viewer with the wanted annotations
self.load(annotations, util.mousePosition(event));
});
},
// Starts the hide timer. This returns a promise that is resolved when the
// viewer has been hidden. If the viewer is already hidden, the promise will
// be resolved instantly.
//
// activity - A boolean indicating whether the need to hide is due to a user
// actively indicating a desire to view another annotation (as
// opposed to merely mousing off the current one). Default: false
//
// Returns a Promise.
_startHideTimer: function (activity) {
if (typeof activity === 'undefined' || activity === null) {
activity = false;
}
// If timer has already been set, use that one.
if (this.hideTimer) {
if (activity === false || this.hideTimerActivity === activity) {
return this.hideTimerDfd;
} else {
// The pending timeout is an inactivity timeout, so likely to be
// too slow. Clear the pending timeout and start a new (shorter)
// one!
this._clearHideTimer();
}
}
var timeout;
if (activity) {
timeout = this.options.activityDelay;
} else {
timeout = this.options.inactivityDelay;
}
this.hideTimerDfd = $.Deferred();
if (!this.isShown()) {
this.hideTimer = null;
this.hideTimerDfd.resolve();
this.hideTimerActivity = null;
} else {
var self = this;
this.hideTimer = setTimeout(function () {
self.hide();
self.hideTimerDfd.resolve();
self.hideTimer = null;
}, timeout);
this.hideTimerActivity = Boolean(activity);
}
return this.hideTimerDfd.promise();
},
// Clears the hide timer. Also rejects any promise returned by a previous
// call to _startHideTimer.
//
// Returns nothing.
_clearHideTimer: function () {
clearTimeout(this.hideTimer);
this.hideTimer = null;
this.hideTimerDfd.reject();
this.hideTimerActivity = null;
}
});
// Classes for toggling annotator state.
Viewer.classes = {
showControls: 'annotator-visible'
};
// HTML templates for this.widget and this.item properties.
Viewer.template = [
'<div class="annotator-outer annotator-viewer annotator-hide">',
' <ul class="annotator-widget annotator-listing"></ul>',
'</div>'
].join('\n');
Viewer.itemTemplate = [
'<li class="annotator-annotation annotator-item">',
' <span class="annotator-controls">',
' <a href="#"',
' title="' + _t('View as webpage') + '"',
' class="annotator-link">' + _t('View as webpage') + '</a>',
' <button type="button"',
' title="' + _t('Edit') + '"',
' class="annotator-edit">' + _t('Edit') + '</button>',
' <button type="button"',
' title="' + _t('Delete') + '"',
' class="annotator-delete">' + _t('Delete') + '</button>',
' </span>',
'</li>'
].join('\n');
// Configuration options
Viewer.options = {
// Add the default field(s) to the viewer.
defaultFields: true,
// Time, in milliseconds, before the viewer is hidden when a user mouses off
// the viewer.
inactivityDelay: 500,
// Time, in milliseconds, before the viewer is updated when a user mouses
// over another annotation.
activityDelay: 100,
// Hook, passed an annotation, which determines if the viewer's "edit"
// button is shown. If it is not a function, the button will not be shown.
permitEdit: function () { return false; },
// Hook, passed an annotation, which determines if the viewer's "delete"
// button is shown. If it is not a function, the button will not be shown.
permitDelete: function () { return false; },
// If set to a DOM Element, will set up the viewer to automatically display
// when the user hovers over Annotator highlights within that element.
autoViewHighlights: null,
// Callback, called when the user clicks the edit button for an annotation.
onEdit: function () {},
// Callback, called when the user clicks the delete button for an
// annotation.
onDelete: function () {}
};
// standalone is a module that uses the Viewer to display an viewer widget in
// response to some viewer action (such as mousing over an annotator highlight
// element).
exports.standalone = function standalone(options) {
var widget;
if (typeof options === 'undefined' || options === null) {
options = {};
}
return {
start: function (app) {
var ident = app.registry.getUtility('identityPolicy');
var authz = app.registry.getUtility('authorizationPolicy');
// Set default handlers for what happens when the user clicks the
// edit and delete buttons:
if (typeof options.onEdit === 'undefined') {
options.onEdit = function (annotation) {
app.annotations.update(annotation);
};
}
if (typeof options.onDelete === 'undefined') {
options.onDelete = function (annotation) {
app.annotations['delete'](annotation);
};
}
// Set default handlers that determine whether the edit and delete
// buttons are shown in the viewer:
if (typeof options.permitEdit === 'undefined') {
options.permitEdit = function (annotation) {
return authz.permits('update', annotation, ident.who());
};
}
if (typeof options.permitDelete === 'undefined') {
options.permitDelete = function (annotation) {
return authz.permits('delete', annotation, ident.who());
};
}
widget = new exports.Viewer(options);
},
destroy: function () { widget.destroy(); }
};
};
| {
cond = $.extend({}, cond, {rel: rel});
var results = [];
for (var i = 0, len = data.length; i < len; i++) {
var d = data[i],
match = true;
for (var k in cond) {
if (cond.hasOwnProperty(k) && d[k] !== cond[k]) {
match = false;
break;
}
}
if (match) {
results.push(d);
}
}
return results;
} | identifier_body |
viewer.js | "use strict";
var Widget = require('./widget').Widget,
util = require('../util');
var $ = util.$,
_t = util.gettext;
var NS = 'annotator-viewer';
// Private: simple parser for hypermedia link structure
//
// Examples:
//
// links = [
// {
// rel: 'alternate',
// href: 'http://example.com/pages/14.json',
// type: 'application/json'
// },
// {
// rel: 'prev':
// href: 'http://example.com/pages/13'
// }
// ]
//
// parseLinks(links, 'alternate')
// # => [{rel: 'alternate', href: 'http://...', ... }]
// parseLinks(links, 'alternate', {type: 'text/html'})
// # => []
//
function parseLinks(data, rel, cond) {
cond = $.extend({}, cond, {rel: rel});
var results = [];
for (var i = 0, len = data.length; i < len; i++) {
var d = data[i],
match = true;
for (var k in cond) {
if (cond.hasOwnProperty(k) && d[k] !== cond[k]) {
match = false;
break;
}
}
if (match) {
results.push(d);
}
}
return results;
}
// Public: Creates an element for viewing annotations.
var Viewer = exports.Viewer = Widget.extend({
// Public: Creates an instance of the Viewer object.
//
// options - An Object containing options.
//
// Examples
//
// # Creates a new viewer, adds a custom field and displays an annotation.
// viewer = new Viewer()
// viewer.addField({
// load: someLoadCallback
// })
// viewer.load(annotation)
//
// Returns a new Viewer instance.
constructor: function (options) {
Widget.call(this, options);
this.itemTemplate = Viewer.itemTemplate;
this.fields = [];
this.annotations = [];
this.hideTimer = null;
this.hideTimerDfd = null;
this.hideTimerActivity = null;
this.mouseDown = false;
this.render = function (annotation) {
if (annotation.text) {
return util.escapeHtml(annotation.text);
} else {
return "<i>" + _t('No comment') + "</i>";
}
};
var self = this;
if (this.options.defaultFields) {
this.addField({
load: function (field, annotation) {
$(field).html(self.render(annotation));
}
});
}
if (typeof this.options.onEdit !== 'function') {
throw new TypeError("onEdit callback must be a function");
}
if (typeof this.options.onDelete !== 'function') {
throw new TypeError("onDelete callback must be a function");
}
if (typeof this.options.permitEdit !== 'function') {
throw new TypeError("permitEdit callback must be a function");
}
if (typeof this.options.permitDelete !== 'function') {
throw new TypeError("permitDelete callback must be a function");
}
if (this.options.autoViewHighlights) {
this.document = this.options.autoViewHighlights.ownerDocument;
$(this.options.autoViewHighlights)
.on("mouseover." + NS, '.annotator-hl', function (event) {
// If there are many overlapping highlights, still only
// call _onHighlightMouseover once.
if (event.target === this) {
self._onHighlightMouseover(event);
}
})
.on("mouseleave." + NS, '.annotator-hl', function () {
self._startHideTimer();
});
$(this.document.body)
.on("mousedown." + NS, function (e) {
if (e.which === 1) {
self.mouseDown = true;
}
})
.on("mouseup." + NS, function (e) {
if (e.which === 1) {
self.mouseDown = false;
}
});
}
this.element
.on("click." + NS, '.annotator-edit', function (e) {
self._onEditClick(e);
})
.on("click." + NS, '.annotator-delete', function (e) {
self._onDeleteClick(e);
})
.on("mouseenter." + NS, function () {
self._clearHideTimer();
})
.on("mouseleave." + NS, function () {
self._startHideTimer();
});
},
destroy: function () {
if (this.options.autoViewHighlights) {
$(this.options.autoViewHighlights).off("." + NS);
$(this.document.body).off("." + NS);
}
this.element.off("." + NS);
Widget.prototype.destroy.call(this);
},
// Public: Show the viewer.
//
// position - An Object specifying the position in which to show the editor
// (optional).
//
// Examples
//
// viewer.show()
// viewer.hide()
// viewer.show({top: '100px', left: '80px'})
//
// Returns nothing.
show: function (position) {
if (typeof position !== 'undefined' && position !== null) {
this.element.css({
top: position.top,
left: position.left
});
}
var controls = this.element
.find('.annotator-controls')
.addClass(this.classes.showControls);
var self = this;
setTimeout(function () {
controls.removeClass(self.classes.showControls);
}, 500);
Widget.prototype.show.call(this);
},
// Public: Load annotations into the viewer and show it.
//
// annotation - An Array of annotations.
//
// Examples
//
// viewer.load([annotation1, annotation2, annotation3])
//
// Returns nothing.
load: function (annotations, position) {
this.annotations = annotations || [];
var list = this.element.find('ul:first').empty();
for (var i = 0, len = this.annotations.length; i < len; i++) {
var annotation = this.annotations[i];
this._annotationItem(annotation)
.appendTo(list)
.data('annotation', annotation);
}
this.show(position);
},
// Public: Set the annotation renderer.
//
// renderer - A function that accepts an annotation and returns HTML.
//
// Returns nothing.
setRenderer: function (renderer) {
this.render = renderer;
},
// Private: create the list item for a single annotation
_annotationItem: function (annotation) {
var item = $(this.itemTemplate).clone();
var controls = item.find('.annotator-controls'),
link = controls.find('.annotator-link'),
edit = controls.find('.annotator-edit'),
del = controls.find('.annotator-delete');
var links = parseLinks(
annotation.links || [],
'alternate',
{'type': 'text/html'}
);
var hasValidLink = (links.length > 0 &&
typeof links[0].href !== 'undefined' &&
links[0].href !== null);
if (hasValidLink) {
link.attr('href', links[0].href);
} else {
link.remove();
}
var controller = {};
if (this.options.permitEdit(annotation)) {
controller.showEdit = function () {
edit.removeAttr('disabled');
};
controller.hideEdit = function () {
edit.attr('disabled', 'disabled');
};
} else {
edit.remove();
}
if (this.options.permitDelete(annotation)) {
controller.showDelete = function () {
del.removeAttr('disabled');
};
controller.hideDelete = function () {
del.attr('disabled', 'disabled');
};
} else {
del.remove();
}
for (var i = 0, len = this.fields.length; i < len; i++) {
var field = this.fields[i];
var element = $(field.element).clone().appendTo(item)[0];
field.load(element, annotation, controller);
}
return item;
},
// Public: Adds an addional field to an annotation view. A callback can be
// provided to update the view on load.
//
// options - An options Object. Options are as follows:
// load - Callback Function called when the view is loaded with an
// annotation. Recieves a newly created clone of an item
// and the annotation to be displayed (it will be called
// once for each annotation being loaded).
//
// Examples
//
// # Display a user name.
// viewer.addField({
// # This is called when the viewer is loaded.
// load: (field, annotation) ->
// field = $(field)
//
// if annotation.user
// field.text(annotation.user) # Display the user
// else
// field.remove() # Do not display the field.
// })
//
// Returns itself.
addField: function (options) {
var field = $.extend({
load: function () {}
}, options);
field.element = $('<div />')[0];
this.fields.push(field);
return this;
},
// Event callback: called when the edit button is clicked.
//
// event - An Event object.
//
// Returns nothing.
_onEditClick: function (event) {
var item = $(event.target)
.parents('.annotator-annotation')
.data('annotation');
this.hide();
this.options.onEdit(item);
},
// Event callback: called when the delete button is clicked.
//
// event - An Event object.
//
// Returns nothing.
_onDeleteClick: function (event) {
var item = $(event.target)
.parents('.annotator-annotation')
.data('annotation');
this.hide();
this.options.onDelete(item);
},
// Event callback: called when a user triggers `mouseover` on a highlight
// element.
//
// event - An Event object.
//
// Returns nothing.
_onHighlightMouseover: function (event) {
// If the mouse button is currently depressed, we're probably trying to
// make a selection, so we shouldn't show the viewer.
if (this.mouseDown) {
return;
}
var self = this;
this._startHideTimer(true)
.done(function () {
var annotations = $(event.target)
.parents('.annotator-hl')
.addBack()
.map(function (_, elem) {
return $(elem).data("annotation");
})
.toArray();
// Now show the viewer with the wanted annotations
self.load(annotations, util.mousePosition(event));
});
},
// Starts the hide timer. This returns a promise that is resolved when the
// viewer has been hidden. If the viewer is already hidden, the promise will
// be resolved instantly.
//
// activity - A boolean indicating whether the need to hide is due to a user
// actively indicating a desire to view another annotation (as
// opposed to merely mousing off the current one). Default: false
//
// Returns a Promise.
_startHideTimer: function (activity) {
if (typeof activity === 'undefined' || activity === null) {
activity = false;
}
// If timer has already been set, use that one.
if (this.hideTimer) {
if (activity === false || this.hideTimerActivity === activity) {
return this.hideTimerDfd;
} else {
// The pending timeout is an inactivity timeout, so likely to be
// too slow. Clear the pending timeout and start a new (shorter)
// one!
this._clearHideTimer();
}
}
var timeout;
if (activity) {
timeout = this.options.activityDelay;
} else {
timeout = this.options.inactivityDelay;
}
this.hideTimerDfd = $.Deferred();
if (!this.isShown()) {
this.hideTimer = null;
this.hideTimerDfd.resolve();
this.hideTimerActivity = null;
} else {
var self = this;
this.hideTimer = setTimeout(function () {
self.hide();
self.hideTimerDfd.resolve();
self.hideTimer = null;
}, timeout);
this.hideTimerActivity = Boolean(activity);
}
return this.hideTimerDfd.promise();
},
// Clears the hide timer. Also rejects any promise returned by a previous
// call to _startHideTimer.
//
// Returns nothing.
_clearHideTimer: function () {
clearTimeout(this.hideTimer);
this.hideTimer = null;
this.hideTimerDfd.reject();
this.hideTimerActivity = null;
}
});
// Classes for toggling annotator state.
Viewer.classes = {
showControls: 'annotator-visible'
};
// HTML templates for this.widget and this.item properties.
Viewer.template = [
'<div class="annotator-outer annotator-viewer annotator-hide">',
' <ul class="annotator-widget annotator-listing"></ul>',
'</div>'
].join('\n');
Viewer.itemTemplate = [
'<li class="annotator-annotation annotator-item">',
' <span class="annotator-controls">',
' <a href="#"',
' title="' + _t('View as webpage') + '"',
' class="annotator-link">' + _t('View as webpage') + '</a>',
' <button type="button"',
' title="' + _t('Edit') + '"',
' class="annotator-edit">' + _t('Edit') + '</button>',
' <button type="button"',
' title="' + _t('Delete') + '"',
' class="annotator-delete">' + _t('Delete') + '</button>',
' </span>',
'</li>'
].join('\n');
// Configuration options
Viewer.options = {
// Add the default field(s) to the viewer.
defaultFields: true,
// Time, in milliseconds, before the viewer is hidden when a user mouses off
// the viewer.
inactivityDelay: 500,
// Time, in milliseconds, before the viewer is updated when a user mouses
// over another annotation.
activityDelay: 100,
// Hook, passed an annotation, which determines if the viewer's "edit"
// button is shown. If it is not a function, the button will not be shown.
permitEdit: function () { return false; },
// Hook, passed an annotation, which determines if the viewer's "delete"
// button is shown. If it is not a function, the button will not be shown.
permitDelete: function () { return false; },
// If set to a DOM Element, will set up the viewer to automatically display
// when the user hovers over Annotator highlights within that element.
autoViewHighlights: null,
// Callback, called when the user clicks the edit button for an annotation.
onEdit: function () {},
// Callback, called when the user clicks the delete button for an
// annotation.
onDelete: function () {}
};
// standalone is a module that uses the Viewer to display an viewer widget in
// response to some viewer action (such as mousing over an annotator highlight
// element).
exports.standalone = function standalone(options) {
var widget;
if (typeof options === 'undefined' || options === null) {
options = {};
}
return {
start: function (app) {
var ident = app.registry.getUtility('identityPolicy');
var authz = app.registry.getUtility('authorizationPolicy');
// Set default handlers for what happens when the user clicks the
// edit and delete buttons:
if (typeof options.onEdit === 'undefined') {
options.onEdit = function (annotation) {
app.annotations.update(annotation);
};
}
if (typeof options.onDelete === 'undefined') {
options.onDelete = function (annotation) {
app.annotations['delete'](annotation);
};
}
// Set default handlers that determine whether the edit and delete
// buttons are shown in the viewer:
if (typeof options.permitEdit === 'undefined') |
if (typeof options.permitDelete === 'undefined') {
options.permitDelete = function (annotation) {
return authz.permits('delete', annotation, ident.who());
};
}
widget = new exports.Viewer(options);
},
destroy: function () { widget.destroy(); }
};
};
| {
options.permitEdit = function (annotation) {
return authz.permits('update', annotation, ident.who());
};
} | conditional_block |
viewer.js | "use strict";
var Widget = require('./widget').Widget,
util = require('../util');
var $ = util.$,
_t = util.gettext;
var NS = 'annotator-viewer';
// Private: simple parser for hypermedia link structure
//
// Examples:
//
// links = [
// {
// rel: 'alternate',
// href: 'http://example.com/pages/14.json',
// type: 'application/json'
// },
// {
// rel: 'prev':
// href: 'http://example.com/pages/13'
// }
// ]
//
// parseLinks(links, 'alternate')
// # => [{rel: 'alternate', href: 'http://...', ... }]
// parseLinks(links, 'alternate', {type: 'text/html'})
// # => []
//
function parseLinks(data, rel, cond) {
cond = $.extend({}, cond, {rel: rel});
var results = [];
for (var i = 0, len = data.length; i < len; i++) {
var d = data[i],
match = true;
for (var k in cond) { |
if (match) {
results.push(d);
}
}
return results;
}
// Public: Creates an element for viewing annotations.
var Viewer = exports.Viewer = Widget.extend({
// Public: Creates an instance of the Viewer object.
//
// options - An Object containing options.
//
// Examples
//
// # Creates a new viewer, adds a custom field and displays an annotation.
// viewer = new Viewer()
// viewer.addField({
// load: someLoadCallback
// })
// viewer.load(annotation)
//
// Returns a new Viewer instance.
constructor: function (options) {
Widget.call(this, options);
this.itemTemplate = Viewer.itemTemplate;
this.fields = [];
this.annotations = [];
this.hideTimer = null;
this.hideTimerDfd = null;
this.hideTimerActivity = null;
this.mouseDown = false;
this.render = function (annotation) {
if (annotation.text) {
return util.escapeHtml(annotation.text);
} else {
return "<i>" + _t('No comment') + "</i>";
}
};
var self = this;
if (this.options.defaultFields) {
this.addField({
load: function (field, annotation) {
$(field).html(self.render(annotation));
}
});
}
if (typeof this.options.onEdit !== 'function') {
throw new TypeError("onEdit callback must be a function");
}
if (typeof this.options.onDelete !== 'function') {
throw new TypeError("onDelete callback must be a function");
}
if (typeof this.options.permitEdit !== 'function') {
throw new TypeError("permitEdit callback must be a function");
}
if (typeof this.options.permitDelete !== 'function') {
throw new TypeError("permitDelete callback must be a function");
}
if (this.options.autoViewHighlights) {
this.document = this.options.autoViewHighlights.ownerDocument;
$(this.options.autoViewHighlights)
.on("mouseover." + NS, '.annotator-hl', function (event) {
// If there are many overlapping highlights, still only
// call _onHighlightMouseover once.
if (event.target === this) {
self._onHighlightMouseover(event);
}
})
.on("mouseleave." + NS, '.annotator-hl', function () {
self._startHideTimer();
});
$(this.document.body)
.on("mousedown." + NS, function (e) {
if (e.which === 1) {
self.mouseDown = true;
}
})
.on("mouseup." + NS, function (e) {
if (e.which === 1) {
self.mouseDown = false;
}
});
}
this.element
.on("click." + NS, '.annotator-edit', function (e) {
self._onEditClick(e);
})
.on("click." + NS, '.annotator-delete', function (e) {
self._onDeleteClick(e);
})
.on("mouseenter." + NS, function () {
self._clearHideTimer();
})
.on("mouseleave." + NS, function () {
self._startHideTimer();
});
},
destroy: function () {
if (this.options.autoViewHighlights) {
$(this.options.autoViewHighlights).off("." + NS);
$(this.document.body).off("." + NS);
}
this.element.off("." + NS);
Widget.prototype.destroy.call(this);
},
// Public: Show the viewer.
//
// position - An Object specifying the position in which to show the editor
// (optional).
//
// Examples
//
// viewer.show()
// viewer.hide()
// viewer.show({top: '100px', left: '80px'})
//
// Returns nothing.
show: function (position) {
if (typeof position !== 'undefined' && position !== null) {
this.element.css({
top: position.top,
left: position.left
});
}
var controls = this.element
.find('.annotator-controls')
.addClass(this.classes.showControls);
var self = this;
setTimeout(function () {
controls.removeClass(self.classes.showControls);
}, 500);
Widget.prototype.show.call(this);
},
// Public: Load annotations into the viewer and show it.
//
// annotation - An Array of annotations.
//
// Examples
//
// viewer.load([annotation1, annotation2, annotation3])
//
// Returns nothing.
load: function (annotations, position) {
this.annotations = annotations || [];
var list = this.element.find('ul:first').empty();
for (var i = 0, len = this.annotations.length; i < len; i++) {
var annotation = this.annotations[i];
this._annotationItem(annotation)
.appendTo(list)
.data('annotation', annotation);
}
this.show(position);
},
// Public: Set the annotation renderer.
//
// renderer - A function that accepts an annotation and returns HTML.
//
// Returns nothing.
setRenderer: function (renderer) {
this.render = renderer;
},
// Private: create the list item for a single annotation
_annotationItem: function (annotation) {
var item = $(this.itemTemplate).clone();
var controls = item.find('.annotator-controls'),
link = controls.find('.annotator-link'),
edit = controls.find('.annotator-edit'),
del = controls.find('.annotator-delete');
var links = parseLinks(
annotation.links || [],
'alternate',
{'type': 'text/html'}
);
var hasValidLink = (links.length > 0 &&
typeof links[0].href !== 'undefined' &&
links[0].href !== null);
if (hasValidLink) {
link.attr('href', links[0].href);
} else {
link.remove();
}
var controller = {};
if (this.options.permitEdit(annotation)) {
controller.showEdit = function () {
edit.removeAttr('disabled');
};
controller.hideEdit = function () {
edit.attr('disabled', 'disabled');
};
} else {
edit.remove();
}
if (this.options.permitDelete(annotation)) {
controller.showDelete = function () {
del.removeAttr('disabled');
};
controller.hideDelete = function () {
del.attr('disabled', 'disabled');
};
} else {
del.remove();
}
for (var i = 0, len = this.fields.length; i < len; i++) {
var field = this.fields[i];
var element = $(field.element).clone().appendTo(item)[0];
field.load(element, annotation, controller);
}
return item;
},
// Public: Adds an addional field to an annotation view. A callback can be
// provided to update the view on load.
//
// options - An options Object. Options are as follows:
// load - Callback Function called when the view is loaded with an
// annotation. Recieves a newly created clone of an item
// and the annotation to be displayed (it will be called
// once for each annotation being loaded).
//
// Examples
//
// # Display a user name.
// viewer.addField({
// # This is called when the viewer is loaded.
// load: (field, annotation) ->
// field = $(field)
//
// if annotation.user
// field.text(annotation.user) # Display the user
// else
// field.remove() # Do not display the field.
// })
//
// Returns itself.
addField: function (options) {
var field = $.extend({
load: function () {}
}, options);
field.element = $('<div />')[0];
this.fields.push(field);
return this;
},
// Event callback: called when the edit button is clicked.
//
// event - An Event object.
//
// Returns nothing.
_onEditClick: function (event) {
var item = $(event.target)
.parents('.annotator-annotation')
.data('annotation');
this.hide();
this.options.onEdit(item);
},
// Event callback: called when the delete button is clicked.
//
// event - An Event object.
//
// Returns nothing.
_onDeleteClick: function (event) {
var item = $(event.target)
.parents('.annotator-annotation')
.data('annotation');
this.hide();
this.options.onDelete(item);
},
// Event callback: called when a user triggers `mouseover` on a highlight
// element.
//
// event - An Event object.
//
// Returns nothing.
_onHighlightMouseover: function (event) {
// If the mouse button is currently depressed, we're probably trying to
// make a selection, so we shouldn't show the viewer.
if (this.mouseDown) {
return;
}
var self = this;
this._startHideTimer(true)
.done(function () {
var annotations = $(event.target)
.parents('.annotator-hl')
.addBack()
.map(function (_, elem) {
return $(elem).data("annotation");
})
.toArray();
// Now show the viewer with the wanted annotations
self.load(annotations, util.mousePosition(event));
});
},
// Starts the hide timer. This returns a promise that is resolved when the
// viewer has been hidden. If the viewer is already hidden, the promise will
// be resolved instantly.
//
// activity - A boolean indicating whether the need to hide is due to a user
// actively indicating a desire to view another annotation (as
// opposed to merely mousing off the current one). Default: false
//
// Returns a Promise.
_startHideTimer: function (activity) {
if (typeof activity === 'undefined' || activity === null) {
activity = false;
}
// If timer has already been set, use that one.
if (this.hideTimer) {
if (activity === false || this.hideTimerActivity === activity) {
return this.hideTimerDfd;
} else {
// The pending timeout is an inactivity timeout, so likely to be
// too slow. Clear the pending timeout and start a new (shorter)
// one!
this._clearHideTimer();
}
}
var timeout;
if (activity) {
timeout = this.options.activityDelay;
} else {
timeout = this.options.inactivityDelay;
}
this.hideTimerDfd = $.Deferred();
if (!this.isShown()) {
this.hideTimer = null;
this.hideTimerDfd.resolve();
this.hideTimerActivity = null;
} else {
var self = this;
this.hideTimer = setTimeout(function () {
self.hide();
self.hideTimerDfd.resolve();
self.hideTimer = null;
}, timeout);
this.hideTimerActivity = Boolean(activity);
}
return this.hideTimerDfd.promise();
},
// Clears the hide timer. Also rejects any promise returned by a previous
// call to _startHideTimer.
//
// Returns nothing.
_clearHideTimer: function () {
clearTimeout(this.hideTimer);
this.hideTimer = null;
this.hideTimerDfd.reject();
this.hideTimerActivity = null;
}
});
// Classes for toggling annotator state.
Viewer.classes = {
showControls: 'annotator-visible'
};
// HTML templates for this.widget and this.item properties.
Viewer.template = [
'<div class="annotator-outer annotator-viewer annotator-hide">',
' <ul class="annotator-widget annotator-listing"></ul>',
'</div>'
].join('\n');
Viewer.itemTemplate = [
'<li class="annotator-annotation annotator-item">',
' <span class="annotator-controls">',
' <a href="#"',
' title="' + _t('View as webpage') + '"',
' class="annotator-link">' + _t('View as webpage') + '</a>',
' <button type="button"',
' title="' + _t('Edit') + '"',
' class="annotator-edit">' + _t('Edit') + '</button>',
' <button type="button"',
' title="' + _t('Delete') + '"',
' class="annotator-delete">' + _t('Delete') + '</button>',
' </span>',
'</li>'
].join('\n');
// Configuration options
Viewer.options = {
// Add the default field(s) to the viewer.
defaultFields: true,
// Time, in milliseconds, before the viewer is hidden when a user mouses off
// the viewer.
inactivityDelay: 500,
// Time, in milliseconds, before the viewer is updated when a user mouses
// over another annotation.
activityDelay: 100,
// Hook, passed an annotation, which determines if the viewer's "edit"
// button is shown. If it is not a function, the button will not be shown.
permitEdit: function () { return false; },
// Hook, passed an annotation, which determines if the viewer's "delete"
// button is shown. If it is not a function, the button will not be shown.
permitDelete: function () { return false; },
// If set to a DOM Element, will set up the viewer to automatically display
// when the user hovers over Annotator highlights within that element.
autoViewHighlights: null,
// Callback, called when the user clicks the edit button for an annotation.
onEdit: function () {},
// Callback, called when the user clicks the delete button for an
// annotation.
onDelete: function () {}
};
// standalone is a module that uses the Viewer to display an viewer widget in
// response to some viewer action (such as mousing over an annotator highlight
// element).
exports.standalone = function standalone(options) {
var widget;
if (typeof options === 'undefined' || options === null) {
options = {};
}
return {
start: function (app) {
var ident = app.registry.getUtility('identityPolicy');
var authz = app.registry.getUtility('authorizationPolicy');
// Set default handlers for what happens when the user clicks the
// edit and delete buttons:
if (typeof options.onEdit === 'undefined') {
options.onEdit = function (annotation) {
app.annotations.update(annotation);
};
}
if (typeof options.onDelete === 'undefined') {
options.onDelete = function (annotation) {
app.annotations['delete'](annotation);
};
}
// Set default handlers that determine whether the edit and delete
// buttons are shown in the viewer:
if (typeof options.permitEdit === 'undefined') {
options.permitEdit = function (annotation) {
return authz.permits('update', annotation, ident.who());
};
}
if (typeof options.permitDelete === 'undefined') {
options.permitDelete = function (annotation) {
return authz.permits('delete', annotation, ident.who());
};
}
widget = new exports.Viewer(options);
},
destroy: function () { widget.destroy(); }
};
}; | if (cond.hasOwnProperty(k) && d[k] !== cond[k]) {
match = false;
break;
}
} | random_line_split |
viewer.js | "use strict";
var Widget = require('./widget').Widget,
util = require('../util');
var $ = util.$,
_t = util.gettext;
var NS = 'annotator-viewer';
// Private: simple parser for hypermedia link structure
//
// Examples:
//
// links = [
// {
// rel: 'alternate',
// href: 'http://example.com/pages/14.json',
// type: 'application/json'
// },
// {
// rel: 'prev':
// href: 'http://example.com/pages/13'
// }
// ]
//
// parseLinks(links, 'alternate')
// # => [{rel: 'alternate', href: 'http://...', ... }]
// parseLinks(links, 'alternate', {type: 'text/html'})
// # => []
//
function | (data, rel, cond) {
cond = $.extend({}, cond, {rel: rel});
var results = [];
for (var i = 0, len = data.length; i < len; i++) {
var d = data[i],
match = true;
for (var k in cond) {
if (cond.hasOwnProperty(k) && d[k] !== cond[k]) {
match = false;
break;
}
}
if (match) {
results.push(d);
}
}
return results;
}
// Public: Creates an element for viewing annotations.
var Viewer = exports.Viewer = Widget.extend({
// Public: Creates an instance of the Viewer object.
//
// options - An Object containing options.
//
// Examples
//
// # Creates a new viewer, adds a custom field and displays an annotation.
// viewer = new Viewer()
// viewer.addField({
// load: someLoadCallback
// })
// viewer.load(annotation)
//
// Returns a new Viewer instance.
constructor: function (options) {
Widget.call(this, options);
this.itemTemplate = Viewer.itemTemplate;
this.fields = [];
this.annotations = [];
this.hideTimer = null;
this.hideTimerDfd = null;
this.hideTimerActivity = null;
this.mouseDown = false;
this.render = function (annotation) {
if (annotation.text) {
return util.escapeHtml(annotation.text);
} else {
return "<i>" + _t('No comment') + "</i>";
}
};
var self = this;
if (this.options.defaultFields) {
this.addField({
load: function (field, annotation) {
$(field).html(self.render(annotation));
}
});
}
if (typeof this.options.onEdit !== 'function') {
throw new TypeError("onEdit callback must be a function");
}
if (typeof this.options.onDelete !== 'function') {
throw new TypeError("onDelete callback must be a function");
}
if (typeof this.options.permitEdit !== 'function') {
throw new TypeError("permitEdit callback must be a function");
}
if (typeof this.options.permitDelete !== 'function') {
throw new TypeError("permitDelete callback must be a function");
}
if (this.options.autoViewHighlights) {
this.document = this.options.autoViewHighlights.ownerDocument;
$(this.options.autoViewHighlights)
.on("mouseover." + NS, '.annotator-hl', function (event) {
// If there are many overlapping highlights, still only
// call _onHighlightMouseover once.
if (event.target === this) {
self._onHighlightMouseover(event);
}
})
.on("mouseleave." + NS, '.annotator-hl', function () {
self._startHideTimer();
});
$(this.document.body)
.on("mousedown." + NS, function (e) {
if (e.which === 1) {
self.mouseDown = true;
}
})
.on("mouseup." + NS, function (e) {
if (e.which === 1) {
self.mouseDown = false;
}
});
}
this.element
.on("click." + NS, '.annotator-edit', function (e) {
self._onEditClick(e);
})
.on("click." + NS, '.annotator-delete', function (e) {
self._onDeleteClick(e);
})
.on("mouseenter." + NS, function () {
self._clearHideTimer();
})
.on("mouseleave." + NS, function () {
self._startHideTimer();
});
},
destroy: function () {
if (this.options.autoViewHighlights) {
$(this.options.autoViewHighlights).off("." + NS);
$(this.document.body).off("." + NS);
}
this.element.off("." + NS);
Widget.prototype.destroy.call(this);
},
// Public: Show the viewer.
//
// position - An Object specifying the position in which to show the editor
// (optional).
//
// Examples
//
// viewer.show()
// viewer.hide()
// viewer.show({top: '100px', left: '80px'})
//
// Returns nothing.
show: function (position) {
if (typeof position !== 'undefined' && position !== null) {
this.element.css({
top: position.top,
left: position.left
});
}
var controls = this.element
.find('.annotator-controls')
.addClass(this.classes.showControls);
var self = this;
setTimeout(function () {
controls.removeClass(self.classes.showControls);
}, 500);
Widget.prototype.show.call(this);
},
// Public: Load annotations into the viewer and show it.
//
// annotation - An Array of annotations.
//
// Examples
//
// viewer.load([annotation1, annotation2, annotation3])
//
// Returns nothing.
load: function (annotations, position) {
this.annotations = annotations || [];
var list = this.element.find('ul:first').empty();
for (var i = 0, len = this.annotations.length; i < len; i++) {
var annotation = this.annotations[i];
this._annotationItem(annotation)
.appendTo(list)
.data('annotation', annotation);
}
this.show(position);
},
// Public: Set the annotation renderer.
//
// renderer - A function that accepts an annotation and returns HTML.
//
// Returns nothing.
setRenderer: function (renderer) {
this.render = renderer;
},
// Private: create the list item for a single annotation
_annotationItem: function (annotation) {
var item = $(this.itemTemplate).clone();
var controls = item.find('.annotator-controls'),
link = controls.find('.annotator-link'),
edit = controls.find('.annotator-edit'),
del = controls.find('.annotator-delete');
var links = parseLinks(
annotation.links || [],
'alternate',
{'type': 'text/html'}
);
var hasValidLink = (links.length > 0 &&
typeof links[0].href !== 'undefined' &&
links[0].href !== null);
if (hasValidLink) {
link.attr('href', links[0].href);
} else {
link.remove();
}
var controller = {};
if (this.options.permitEdit(annotation)) {
controller.showEdit = function () {
edit.removeAttr('disabled');
};
controller.hideEdit = function () {
edit.attr('disabled', 'disabled');
};
} else {
edit.remove();
}
if (this.options.permitDelete(annotation)) {
controller.showDelete = function () {
del.removeAttr('disabled');
};
controller.hideDelete = function () {
del.attr('disabled', 'disabled');
};
} else {
del.remove();
}
for (var i = 0, len = this.fields.length; i < len; i++) {
var field = this.fields[i];
var element = $(field.element).clone().appendTo(item)[0];
field.load(element, annotation, controller);
}
return item;
},
// Public: Adds an addional field to an annotation view. A callback can be
// provided to update the view on load.
//
// options - An options Object. Options are as follows:
// load - Callback Function called when the view is loaded with an
// annotation. Recieves a newly created clone of an item
// and the annotation to be displayed (it will be called
// once for each annotation being loaded).
//
// Examples
//
// # Display a user name.
// viewer.addField({
// # This is called when the viewer is loaded.
// load: (field, annotation) ->
// field = $(field)
//
// if annotation.user
// field.text(annotation.user) # Display the user
// else
// field.remove() # Do not display the field.
// })
//
// Returns itself.
addField: function (options) {
var field = $.extend({
load: function () {}
}, options);
field.element = $('<div />')[0];
this.fields.push(field);
return this;
},
// Event callback: called when the edit button is clicked.
//
// event - An Event object.
//
// Returns nothing.
_onEditClick: function (event) {
var item = $(event.target)
.parents('.annotator-annotation')
.data('annotation');
this.hide();
this.options.onEdit(item);
},
// Event callback: called when the delete button is clicked.
//
// event - An Event object.
//
// Returns nothing.
_onDeleteClick: function (event) {
var item = $(event.target)
.parents('.annotator-annotation')
.data('annotation');
this.hide();
this.options.onDelete(item);
},
// Event callback: called when a user triggers `mouseover` on a highlight
// element.
//
// event - An Event object.
//
// Returns nothing.
_onHighlightMouseover: function (event) {
// If the mouse button is currently depressed, we're probably trying to
// make a selection, so we shouldn't show the viewer.
if (this.mouseDown) {
return;
}
var self = this;
this._startHideTimer(true)
.done(function () {
var annotations = $(event.target)
.parents('.annotator-hl')
.addBack()
.map(function (_, elem) {
return $(elem).data("annotation");
})
.toArray();
// Now show the viewer with the wanted annotations
self.load(annotations, util.mousePosition(event));
});
},
// Starts the hide timer. This returns a promise that is resolved when the
// viewer has been hidden. If the viewer is already hidden, the promise will
// be resolved instantly.
//
// activity - A boolean indicating whether the need to hide is due to a user
// actively indicating a desire to view another annotation (as
// opposed to merely mousing off the current one). Default: false
//
// Returns a Promise.
_startHideTimer: function (activity) {
if (typeof activity === 'undefined' || activity === null) {
activity = false;
}
// If timer has already been set, use that one.
if (this.hideTimer) {
if (activity === false || this.hideTimerActivity === activity) {
return this.hideTimerDfd;
} else {
// The pending timeout is an inactivity timeout, so likely to be
// too slow. Clear the pending timeout and start a new (shorter)
// one!
this._clearHideTimer();
}
}
var timeout;
if (activity) {
timeout = this.options.activityDelay;
} else {
timeout = this.options.inactivityDelay;
}
this.hideTimerDfd = $.Deferred();
if (!this.isShown()) {
this.hideTimer = null;
this.hideTimerDfd.resolve();
this.hideTimerActivity = null;
} else {
var self = this;
this.hideTimer = setTimeout(function () {
self.hide();
self.hideTimerDfd.resolve();
self.hideTimer = null;
}, timeout);
this.hideTimerActivity = Boolean(activity);
}
return this.hideTimerDfd.promise();
},
// Clears the hide timer. Also rejects any promise returned by a previous
// call to _startHideTimer.
//
// Returns nothing.
_clearHideTimer: function () {
clearTimeout(this.hideTimer);
this.hideTimer = null;
this.hideTimerDfd.reject();
this.hideTimerActivity = null;
}
});
// Classes for toggling annotator state.
Viewer.classes = {
showControls: 'annotator-visible'
};
// HTML templates for this.widget and this.item properties.
Viewer.template = [
'<div class="annotator-outer annotator-viewer annotator-hide">',
' <ul class="annotator-widget annotator-listing"></ul>',
'</div>'
].join('\n');
Viewer.itemTemplate = [
'<li class="annotator-annotation annotator-item">',
' <span class="annotator-controls">',
' <a href="#"',
' title="' + _t('View as webpage') + '"',
' class="annotator-link">' + _t('View as webpage') + '</a>',
' <button type="button"',
' title="' + _t('Edit') + '"',
' class="annotator-edit">' + _t('Edit') + '</button>',
' <button type="button"',
' title="' + _t('Delete') + '"',
' class="annotator-delete">' + _t('Delete') + '</button>',
' </span>',
'</li>'
].join('\n');
// Configuration options
Viewer.options = {
// Add the default field(s) to the viewer.
defaultFields: true,
// Time, in milliseconds, before the viewer is hidden when a user mouses off
// the viewer.
inactivityDelay: 500,
// Time, in milliseconds, before the viewer is updated when a user mouses
// over another annotation.
activityDelay: 100,
// Hook, passed an annotation, which determines if the viewer's "edit"
// button is shown. If it is not a function, the button will not be shown.
permitEdit: function () { return false; },
// Hook, passed an annotation, which determines if the viewer's "delete"
// button is shown. If it is not a function, the button will not be shown.
permitDelete: function () { return false; },
// If set to a DOM Element, will set up the viewer to automatically display
// when the user hovers over Annotator highlights within that element.
autoViewHighlights: null,
// Callback, called when the user clicks the edit button for an annotation.
onEdit: function () {},
// Callback, called when the user clicks the delete button for an
// annotation.
onDelete: function () {}
};
// standalone is a module that uses the Viewer to display an viewer widget in
// response to some viewer action (such as mousing over an annotator highlight
// element).
exports.standalone = function standalone(options) {
var widget;
if (typeof options === 'undefined' || options === null) {
options = {};
}
return {
start: function (app) {
var ident = app.registry.getUtility('identityPolicy');
var authz = app.registry.getUtility('authorizationPolicy');
// Set default handlers for what happens when the user clicks the
// edit and delete buttons:
if (typeof options.onEdit === 'undefined') {
options.onEdit = function (annotation) {
app.annotations.update(annotation);
};
}
if (typeof options.onDelete === 'undefined') {
options.onDelete = function (annotation) {
app.annotations['delete'](annotation);
};
}
// Set default handlers that determine whether the edit and delete
// buttons are shown in the viewer:
if (typeof options.permitEdit === 'undefined') {
options.permitEdit = function (annotation) {
return authz.permits('update', annotation, ident.who());
};
}
if (typeof options.permitDelete === 'undefined') {
options.permitDelete = function (annotation) {
return authz.permits('delete', annotation, ident.who());
};
}
widget = new exports.Viewer(options);
},
destroy: function () { widget.destroy(); }
};
};
| parseLinks | identifier_name |
motorhead.go | package main
import (
"bytes"
"fmt"
"github.com/jasonlvhit/gocron"
"github.com/nlopes/slack"
"github.com/zmb3/spotify"
"log"
"math/rand"
"os"
"strings"
"time"
)
type BotCentral struct {
Channel *slack.Channel
Event *slack.MessageEvent
UserId string
}
type ReplyChannel struct {
Channel *slack.Channel
Attachments []slack.Attachment
DisplayTitle string
}
type StandupChannel struct {
Channel *slack.Channel
StandupTime time.Time
}
var (
commandChannel chan *BotCentral
replyChannel chan ReplyChannel
standupChannel chan StandupChannel
api *slack.Client
standupTime time.Time
genresSeeds []string
setTimes []string
)
func startBot() {
token := os.Getenv("SLACK_TOKEN")
api = slack.New(token)
rtm := api.NewRTM()
commandChannel = make(chan *BotCentral)
replyChannel = make(chan ReplyChannel)
standupChannel = make(chan StandupChannel)
go rtm.ManageConnection()
go handleCommands(replyChannel, standupChannel)
go handleReply()
go handleStandupTimer(replyChannel)
| case msg := <-rtm.IncomingEvents:
switch ev := msg.Data.(type) {
case *slack.ConnectedEvent:
fmt.Println("Connection counter:", ev.ConnectionCount)
case *slack.MessageEvent:
channelInfo, err := api.GetChannelInfo(ev.Channel)
if err != nil {
log.Println(err)
}
botCentral := &BotCentral{
Channel: channelInfo,
Event: ev,
UserId: ev.User,
}
if ev.Type == "message" && strings.HasPrefix(ev.Text, "<@"+rtm.GetInfo().User.ID+">") {
commandChannel <- botCentral
}
case *slack.RTMError:
fmt.Printf("Error: %s\n", ev.Error())
case *slack.InvalidAuthEvent:
fmt.Printf("Invalid credentials")
break Loop
default:
//Take no action
}
}
}
}
func handleCommands(c chan ReplyChannel, sc chan StandupChannel) {
commands := map[string]string{
"help": "see the available commands",
"play": "play some tunes LENNY !",
"stop": "pause|stop da music",
"next": "next tune LENNY, this ain't any good",
"previous": "dat tune was ace, play it again LENNY",
"add": "queue dis beats",
"list": "list me current tunes",
"set-time": "When's my set time ?",
"set-times": "Give me the set times",
"genres": "List me some flavours",
"current": "What's currently on da radio?",
"search": "Where are all the tunes ?",
}
var replyChannel ReplyChannel
var standupChannel StandupChannel
for {
botChannel := <-commandChannel
replyChannel.Channel = botChannel.Channel
standupChannel.Channel = botChannel.Channel
commandArray := strings.Fields(botChannel.Event.Text)
log.Printf("%+v\n", commandArray)
switch commandArray[1] {
case "help":
fields := make([]slack.AttachmentField, 0)
for k, v := range commands {
fields = append(fields, slack.AttachmentField{
Title: "@lenny " + k,
Value: v,
})
}
attachment := slack.Attachment{
Pretext: "Rockn' tune commands",
Color: "#85929E",
Fields: fields,
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "play":
client.Play()
time.Sleep(2)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "stop":
client.Pause()
attachment := slack.Attachment{
Pretext: "Pausing da tunes",
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "next":
client.Next()
time.Sleep(2 * time.Second)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "previous":
client.Previous()
time.Sleep(2 * time.Second)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "search":
//fields := make([]slack.AttachmentField, 0)
attachments := make([]slack.Attachment, 0)
var buffer bytes.Buffer
for i := 2; i < len(commandArray); i++ {
buffer.WriteString(commandArray[i])
buffer.WriteString(" ")
}
r, _ := client.Search(buffer.String(), spotify.SearchTypeTrack)
if r != nil {
for _, val := range r.Tracks.Tracks {
// fmt.Printf("%+v", val)
// break
// fields = append(fields, slack.AttachmentField{
// Title: fmt.Sprintf("%s, %s", val.Artists[0].Name, val.Album.Name),
// Value: val.Name,
// })
attachment := slack.Attachment{
Pretext: fmt.Sprintf("%s, %s", val.Artists[0].Name, val.Album.Name),
Title: val.Name,
ThumbURL: val.Album.Images[1].URL,
Text: fmt.Sprintf("@lenny add %s", getTrackId(val.Endpoint)),
// Actions: []slack.AttachmentAction{
// slack.AttachmentAction{
// Name: "AddToSpotify",
// Text: "Add",
// Value: val.Endpoint,
// Type: "button",
// },
// },
// CallbackID: "add_track",
}
attachments = append(attachments, attachment)
}
}
if len(attachments) > 0 {
replyChannel.Attachments = attachments
c <- replyChannel
}
case "add":
trackId := commandArray[2]
client.AddTracksToPlaylist(userId, playlistId, spotify.ID(trackId))
attachment := slack.Attachment{
Pretext: "Added da track",
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "list":
attachments := make([]slack.Attachment, 0)
pl, _ := client.GetPlaylist(userId, playlistId)
for _, val := range pl.Tracks.Tracks {
attachment := slack.Attachment{
Pretext: fmt.Sprintf("%s, %s", val.Track.Artists[0].Name, val.Track.Album.Name),
Title: val.Track.Name,
Text: fmt.Sprintf("@lenny play %s", val.Track.Endpoint),
}
attachments = append(attachments, attachment)
}
replyChannel.Attachments = attachments
c <- replyChannel
case "set-time":
requestedTime := commandArray[2]
reqTime, err := time.Parse(time.Kitchen, requestedTime)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, fmt.Sprintf("%s, must be in format 3:00PM/AM", err))
c <- replyChannel
return
}
t := time.Now()
n := time.Date(t.Year(), t.Month(), t.Day(), reqTime.Hour(), reqTime.Minute(), 0, 0, t.Location())
log.Println("RQUESTED TIME: ", n)
standupChannel.StandupTime = n
sc <- standupChannel
case "set-times":
attachments := make([]slack.Attachment, 0)
for i := 0; i < len(setTimes); i++ {
attachment := slack.Attachment{
Text: fmt.Sprintf("set time: %s", setTimes[i]),
}
attachments = append(attachments, attachment)
}
replyChannel.Attachments = attachments
c <- replyChannel
case "set-genres":
genresSeeds = make([]string, 0)
if commandArray[2] == "random" {
attachments := make([]slack.Attachment, 0)
genres, _ := client.GetAvailableGenreSeeds()
for i := 1; i < 5; i++ {
rand.Seed(time.Now().UTC().UnixNano())
genre := genres[rand.Intn(len(genres))]
attachment := slack.Attachment{
Text: fmt.Sprintf("added genre %s", genre),
}
attachments = append(attachments, attachment)
genresSeeds = append(genresSeeds, genre)
}
replyChannel.Attachments = attachments
c <- replyChannel
} else {
for i := 2; i < len(commandArray); i++ {
genresSeeds = append(genresSeeds, commandArray[i])
}
}
case "current":
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Text: cp.Item.Artists[0].Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "genres":
if genres, err := client.GetAvailableGenreSeeds(); err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
c <- replyChannel
return
} else {
var genresBuffer bytes.Buffer
for _, val := range genres {
genresBuffer.WriteString(fmt.Sprintf("%s\n", val))
}
attachment := slack.Attachment{
Text: genresBuffer.String(),
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
case "tribute":
track, _ := client.GetTrack("2HB4bT5bvUWDbOjNpIwmNi")
uris := []spotify.URI{spotify.URI("spotify:track:2HB4bT5bvUWDbOjNpIwmNi")}
opts := &spotify.PlayOptions{URIs: uris}
client.PlayOpt(opts)
attachment := slack.Attachment{
Title: "RIP Chris Cornell",
Text: fmt.Sprintf("%s, %s", track.Album.Name, track.Name),
ImageURL: track.Album.Images[0].URL,
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
default:
attachment := slack.Attachment{
Pretext: "Command error",
Text: "I'm too smashed to play any tunes",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
}
}
func getErrorAttachment(err error, txt string) []slack.Attachment {
attachment := slack.Attachment{
Pretext: "I BROKE !!",
Text: fmt.Sprintf("%s", err),
}
if txt != "" {
attachment.Text = txt
}
return []slack.Attachment{attachment}
}
func timeToPlayMusic(ch chan ReplyChannel, sc *slack.Channel) {
var replyChannel ReplyChannel
replyChannel.Channel = sc
if len(genresSeeds) == 0 {
genresSeeds = append(genresSeeds, "alternative")
}
seeds := spotify.Seeds{
Genres: genresSeeds,
//Artists: []spotify.ID{spotify.ID("5xUf6j4upBrXZPg6AI4MRK"), spotify.ID("2ziB7fzrXBoh1HUPS6sVFn")},
}
trackAttr := spotify.NewTrackAttributes()
// trackAttr.MaxEnergy(1.0)
// trackAttr.TargetEnergy(1.0)
// trackAttr.TargetDanceability(1.0)
opts := &spotify.Options{}
recs, err := client.GetRecommendations(seeds, trackAttr, opts)
// recs, err := client.Search("soundgarden", spotify.SearchTypeArtist)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
log.Printf("%+v", recs)
//playOpts := &spotify.PlayOptions{}
uris := []spotify.URI{}
ids := []spotify.ID{}
for _, val := range recs.Tracks {
trackURL := fmt.Sprintf("spotify:track:%s", val.ID)
uris = append(uris, spotify.URI(trackURL))
ids = append(ids, val.ID)
}
// for _, val := range recs.Tracks.Tracks {
// trackURL := fmt.Sprintf("spotify:track:%s", val.ID)
// uris = append(uris, spotify.URI(trackURL))
// ids = append(ids, val.ID)
// }
err = client.ReplacePlaylistTracks(userId, playlistId, ids...)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
//playOpts.URIs = uris
// client.PlayOpt(&spotify.PlayOptions{
// DeviceID: spotify.ID("11f1a54fa480f1559f1dd0cfdf42e9451dc17cb7"),
// URIs: []spotify.URI{spotify.URI("https://api.spotify.com/v1/tracks/3XVBdLihbNbxUwZosxcGuJ")},
// })
client.Next()
err = client.Play()
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
time.Sleep(2)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Text: cp.Item.Artists[0].Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
ch <- replyChannel
}
func handleStandupTimer(c chan ReplyChannel) {
var replyChannel ReplyChannel
for {
sc := <-standupChannel
replyChannel.Channel = sc.Channel
attachment := slack.Attachment{
Pretext: "Set time engaged",
Text: fmt.Sprintf("I will blast some tunes at %s", sc.StandupTime.Format("3:04PM")),
}
setTime := sc.StandupTime.Format("15:04")
newcron := gocron.NewScheduler()
newcron.Every(1).Monday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Tuesday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Wednesday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Thursday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Friday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
setTimes = append(setTimes, setTime)
go func() {
<-newcron.Start()
}()
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
}
func getTrackId(trackEndpoint string) string {
strs := strings.Split(trackEndpoint, "/")
return strs[len(strs)-1]
}
func handleReply() {
for {
ac := <-replyChannel
params := slack.PostMessageParameters{}
params.AsUser = true
params.Attachments = ac.Attachments
params.UnfurlMedia = true
fmt.Println("Channel: ", ac.Channel.Name)
_, _, errPostMessage := api.PostMessage(ac.Channel.Name, ac.DisplayTitle, params)
if errPostMessage != nil {
log.Fatal(errPostMessage)
}
}
} | Loop:
for {
select { | random_line_split |
motorhead.go | package main
import (
"bytes"
"fmt"
"github.com/jasonlvhit/gocron"
"github.com/nlopes/slack"
"github.com/zmb3/spotify"
"log"
"math/rand"
"os"
"strings"
"time"
)
type BotCentral struct {
Channel *slack.Channel
Event *slack.MessageEvent
UserId string
}
type ReplyChannel struct {
Channel *slack.Channel
Attachments []slack.Attachment
DisplayTitle string
}
type StandupChannel struct {
Channel *slack.Channel
StandupTime time.Time
}
var (
commandChannel chan *BotCentral
replyChannel chan ReplyChannel
standupChannel chan StandupChannel
api *slack.Client
standupTime time.Time
genresSeeds []string
setTimes []string
)
func startBot() {
token := os.Getenv("SLACK_TOKEN")
api = slack.New(token)
rtm := api.NewRTM()
commandChannel = make(chan *BotCentral)
replyChannel = make(chan ReplyChannel)
standupChannel = make(chan StandupChannel)
go rtm.ManageConnection()
go handleCommands(replyChannel, standupChannel)
go handleReply()
go handleStandupTimer(replyChannel)
Loop:
for {
select {
case msg := <-rtm.IncomingEvents:
switch ev := msg.Data.(type) {
case *slack.ConnectedEvent:
fmt.Println("Connection counter:", ev.ConnectionCount)
case *slack.MessageEvent:
channelInfo, err := api.GetChannelInfo(ev.Channel)
if err != nil {
log.Println(err)
}
botCentral := &BotCentral{
Channel: channelInfo,
Event: ev,
UserId: ev.User,
}
if ev.Type == "message" && strings.HasPrefix(ev.Text, "<@"+rtm.GetInfo().User.ID+">") {
commandChannel <- botCentral
}
case *slack.RTMError:
fmt.Printf("Error: %s\n", ev.Error())
case *slack.InvalidAuthEvent:
fmt.Printf("Invalid credentials")
break Loop
default:
//Take no action
}
}
}
}
func | (c chan ReplyChannel, sc chan StandupChannel) {
commands := map[string]string{
"help": "see the available commands",
"play": "play some tunes LENNY !",
"stop": "pause|stop da music",
"next": "next tune LENNY, this ain't any good",
"previous": "dat tune was ace, play it again LENNY",
"add": "queue dis beats",
"list": "list me current tunes",
"set-time": "When's my set time ?",
"set-times": "Give me the set times",
"genres": "List me some flavours",
"current": "What's currently on da radio?",
"search": "Where are all the tunes ?",
}
var replyChannel ReplyChannel
var standupChannel StandupChannel
for {
botChannel := <-commandChannel
replyChannel.Channel = botChannel.Channel
standupChannel.Channel = botChannel.Channel
commandArray := strings.Fields(botChannel.Event.Text)
log.Printf("%+v\n", commandArray)
switch commandArray[1] {
case "help":
fields := make([]slack.AttachmentField, 0)
for k, v := range commands {
fields = append(fields, slack.AttachmentField{
Title: "@lenny " + k,
Value: v,
})
}
attachment := slack.Attachment{
Pretext: "Rockn' tune commands",
Color: "#85929E",
Fields: fields,
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "play":
client.Play()
time.Sleep(2)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "stop":
client.Pause()
attachment := slack.Attachment{
Pretext: "Pausing da tunes",
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "next":
client.Next()
time.Sleep(2 * time.Second)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "previous":
client.Previous()
time.Sleep(2 * time.Second)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "search":
//fields := make([]slack.AttachmentField, 0)
attachments := make([]slack.Attachment, 0)
var buffer bytes.Buffer
for i := 2; i < len(commandArray); i++ {
buffer.WriteString(commandArray[i])
buffer.WriteString(" ")
}
r, _ := client.Search(buffer.String(), spotify.SearchTypeTrack)
if r != nil {
for _, val := range r.Tracks.Tracks {
// fmt.Printf("%+v", val)
// break
// fields = append(fields, slack.AttachmentField{
// Title: fmt.Sprintf("%s, %s", val.Artists[0].Name, val.Album.Name),
// Value: val.Name,
// })
attachment := slack.Attachment{
Pretext: fmt.Sprintf("%s, %s", val.Artists[0].Name, val.Album.Name),
Title: val.Name,
ThumbURL: val.Album.Images[1].URL,
Text: fmt.Sprintf("@lenny add %s", getTrackId(val.Endpoint)),
// Actions: []slack.AttachmentAction{
// slack.AttachmentAction{
// Name: "AddToSpotify",
// Text: "Add",
// Value: val.Endpoint,
// Type: "button",
// },
// },
// CallbackID: "add_track",
}
attachments = append(attachments, attachment)
}
}
if len(attachments) > 0 {
replyChannel.Attachments = attachments
c <- replyChannel
}
case "add":
trackId := commandArray[2]
client.AddTracksToPlaylist(userId, playlistId, spotify.ID(trackId))
attachment := slack.Attachment{
Pretext: "Added da track",
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "list":
attachments := make([]slack.Attachment, 0)
pl, _ := client.GetPlaylist(userId, playlistId)
for _, val := range pl.Tracks.Tracks {
attachment := slack.Attachment{
Pretext: fmt.Sprintf("%s, %s", val.Track.Artists[0].Name, val.Track.Album.Name),
Title: val.Track.Name,
Text: fmt.Sprintf("@lenny play %s", val.Track.Endpoint),
}
attachments = append(attachments, attachment)
}
replyChannel.Attachments = attachments
c <- replyChannel
case "set-time":
requestedTime := commandArray[2]
reqTime, err := time.Parse(time.Kitchen, requestedTime)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, fmt.Sprintf("%s, must be in format 3:00PM/AM", err))
c <- replyChannel
return
}
t := time.Now()
n := time.Date(t.Year(), t.Month(), t.Day(), reqTime.Hour(), reqTime.Minute(), 0, 0, t.Location())
log.Println("RQUESTED TIME: ", n)
standupChannel.StandupTime = n
sc <- standupChannel
case "set-times":
attachments := make([]slack.Attachment, 0)
for i := 0; i < len(setTimes); i++ {
attachment := slack.Attachment{
Text: fmt.Sprintf("set time: %s", setTimes[i]),
}
attachments = append(attachments, attachment)
}
replyChannel.Attachments = attachments
c <- replyChannel
case "set-genres":
genresSeeds = make([]string, 0)
if commandArray[2] == "random" {
attachments := make([]slack.Attachment, 0)
genres, _ := client.GetAvailableGenreSeeds()
for i := 1; i < 5; i++ {
rand.Seed(time.Now().UTC().UnixNano())
genre := genres[rand.Intn(len(genres))]
attachment := slack.Attachment{
Text: fmt.Sprintf("added genre %s", genre),
}
attachments = append(attachments, attachment)
genresSeeds = append(genresSeeds, genre)
}
replyChannel.Attachments = attachments
c <- replyChannel
} else {
for i := 2; i < len(commandArray); i++ {
genresSeeds = append(genresSeeds, commandArray[i])
}
}
case "current":
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Text: cp.Item.Artists[0].Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "genres":
if genres, err := client.GetAvailableGenreSeeds(); err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
c <- replyChannel
return
} else {
var genresBuffer bytes.Buffer
for _, val := range genres {
genresBuffer.WriteString(fmt.Sprintf("%s\n", val))
}
attachment := slack.Attachment{
Text: genresBuffer.String(),
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
case "tribute":
track, _ := client.GetTrack("2HB4bT5bvUWDbOjNpIwmNi")
uris := []spotify.URI{spotify.URI("spotify:track:2HB4bT5bvUWDbOjNpIwmNi")}
opts := &spotify.PlayOptions{URIs: uris}
client.PlayOpt(opts)
attachment := slack.Attachment{
Title: "RIP Chris Cornell",
Text: fmt.Sprintf("%s, %s", track.Album.Name, track.Name),
ImageURL: track.Album.Images[0].URL,
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
default:
attachment := slack.Attachment{
Pretext: "Command error",
Text: "I'm too smashed to play any tunes",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
}
}
func getErrorAttachment(err error, txt string) []slack.Attachment {
attachment := slack.Attachment{
Pretext: "I BROKE !!",
Text: fmt.Sprintf("%s", err),
}
if txt != "" {
attachment.Text = txt
}
return []slack.Attachment{attachment}
}
func timeToPlayMusic(ch chan ReplyChannel, sc *slack.Channel) {
var replyChannel ReplyChannel
replyChannel.Channel = sc
if len(genresSeeds) == 0 {
genresSeeds = append(genresSeeds, "alternative")
}
seeds := spotify.Seeds{
Genres: genresSeeds,
//Artists: []spotify.ID{spotify.ID("5xUf6j4upBrXZPg6AI4MRK"), spotify.ID("2ziB7fzrXBoh1HUPS6sVFn")},
}
trackAttr := spotify.NewTrackAttributes()
// trackAttr.MaxEnergy(1.0)
// trackAttr.TargetEnergy(1.0)
// trackAttr.TargetDanceability(1.0)
opts := &spotify.Options{}
recs, err := client.GetRecommendations(seeds, trackAttr, opts)
// recs, err := client.Search("soundgarden", spotify.SearchTypeArtist)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
log.Printf("%+v", recs)
//playOpts := &spotify.PlayOptions{}
uris := []spotify.URI{}
ids := []spotify.ID{}
for _, val := range recs.Tracks {
trackURL := fmt.Sprintf("spotify:track:%s", val.ID)
uris = append(uris, spotify.URI(trackURL))
ids = append(ids, val.ID)
}
// for _, val := range recs.Tracks.Tracks {
// trackURL := fmt.Sprintf("spotify:track:%s", val.ID)
// uris = append(uris, spotify.URI(trackURL))
// ids = append(ids, val.ID)
// }
err = client.ReplacePlaylistTracks(userId, playlistId, ids...)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
//playOpts.URIs = uris
// client.PlayOpt(&spotify.PlayOptions{
// DeviceID: spotify.ID("11f1a54fa480f1559f1dd0cfdf42e9451dc17cb7"),
// URIs: []spotify.URI{spotify.URI("https://api.spotify.com/v1/tracks/3XVBdLihbNbxUwZosxcGuJ")},
// })
client.Next()
err = client.Play()
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
time.Sleep(2)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Text: cp.Item.Artists[0].Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
ch <- replyChannel
}
func handleStandupTimer(c chan ReplyChannel) {
var replyChannel ReplyChannel
for {
sc := <-standupChannel
replyChannel.Channel = sc.Channel
attachment := slack.Attachment{
Pretext: "Set time engaged",
Text: fmt.Sprintf("I will blast some tunes at %s", sc.StandupTime.Format("3:04PM")),
}
setTime := sc.StandupTime.Format("15:04")
newcron := gocron.NewScheduler()
newcron.Every(1).Monday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Tuesday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Wednesday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Thursday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Friday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
setTimes = append(setTimes, setTime)
go func() {
<-newcron.Start()
}()
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
}
func getTrackId(trackEndpoint string) string {
strs := strings.Split(trackEndpoint, "/")
return strs[len(strs)-1]
}
func handleReply() {
for {
ac := <-replyChannel
params := slack.PostMessageParameters{}
params.AsUser = true
params.Attachments = ac.Attachments
params.UnfurlMedia = true
fmt.Println("Channel: ", ac.Channel.Name)
_, _, errPostMessage := api.PostMessage(ac.Channel.Name, ac.DisplayTitle, params)
if errPostMessage != nil {
log.Fatal(errPostMessage)
}
}
}
| handleCommands | identifier_name |
motorhead.go | package main
import (
"bytes"
"fmt"
"github.com/jasonlvhit/gocron"
"github.com/nlopes/slack"
"github.com/zmb3/spotify"
"log"
"math/rand"
"os"
"strings"
"time"
)
type BotCentral struct {
Channel *slack.Channel
Event *slack.MessageEvent
UserId string
}
type ReplyChannel struct {
Channel *slack.Channel
Attachments []slack.Attachment
DisplayTitle string
}
type StandupChannel struct {
Channel *slack.Channel
StandupTime time.Time
}
var (
commandChannel chan *BotCentral
replyChannel chan ReplyChannel
standupChannel chan StandupChannel
api *slack.Client
standupTime time.Time
genresSeeds []string
setTimes []string
)
func startBot() {
token := os.Getenv("SLACK_TOKEN")
api = slack.New(token)
rtm := api.NewRTM()
commandChannel = make(chan *BotCentral)
replyChannel = make(chan ReplyChannel)
standupChannel = make(chan StandupChannel)
go rtm.ManageConnection()
go handleCommands(replyChannel, standupChannel)
go handleReply()
go handleStandupTimer(replyChannel)
Loop:
for {
select {
case msg := <-rtm.IncomingEvents:
switch ev := msg.Data.(type) {
case *slack.ConnectedEvent:
fmt.Println("Connection counter:", ev.ConnectionCount)
case *slack.MessageEvent:
channelInfo, err := api.GetChannelInfo(ev.Channel)
if err != nil {
log.Println(err)
}
botCentral := &BotCentral{
Channel: channelInfo,
Event: ev,
UserId: ev.User,
}
if ev.Type == "message" && strings.HasPrefix(ev.Text, "<@"+rtm.GetInfo().User.ID+">") {
commandChannel <- botCentral
}
case *slack.RTMError:
fmt.Printf("Error: %s\n", ev.Error())
case *slack.InvalidAuthEvent:
fmt.Printf("Invalid credentials")
break Loop
default:
//Take no action
}
}
}
}
func handleCommands(c chan ReplyChannel, sc chan StandupChannel) {
commands := map[string]string{
"help": "see the available commands",
"play": "play some tunes LENNY !",
"stop": "pause|stop da music",
"next": "next tune LENNY, this ain't any good",
"previous": "dat tune was ace, play it again LENNY",
"add": "queue dis beats",
"list": "list me current tunes",
"set-time": "When's my set time ?",
"set-times": "Give me the set times",
"genres": "List me some flavours",
"current": "What's currently on da radio?",
"search": "Where are all the tunes ?",
}
var replyChannel ReplyChannel
var standupChannel StandupChannel
for {
botChannel := <-commandChannel
replyChannel.Channel = botChannel.Channel
standupChannel.Channel = botChannel.Channel
commandArray := strings.Fields(botChannel.Event.Text)
log.Printf("%+v\n", commandArray)
switch commandArray[1] {
case "help":
fields := make([]slack.AttachmentField, 0)
for k, v := range commands {
fields = append(fields, slack.AttachmentField{
Title: "@lenny " + k,
Value: v,
})
}
attachment := slack.Attachment{
Pretext: "Rockn' tune commands",
Color: "#85929E",
Fields: fields,
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "play":
client.Play()
time.Sleep(2)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "stop":
client.Pause()
attachment := slack.Attachment{
Pretext: "Pausing da tunes",
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "next":
client.Next()
time.Sleep(2 * time.Second)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "previous":
client.Previous()
time.Sleep(2 * time.Second)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "search":
//fields := make([]slack.AttachmentField, 0)
attachments := make([]slack.Attachment, 0)
var buffer bytes.Buffer
for i := 2; i < len(commandArray); i++ {
buffer.WriteString(commandArray[i])
buffer.WriteString(" ")
}
r, _ := client.Search(buffer.String(), spotify.SearchTypeTrack)
if r != nil {
for _, val := range r.Tracks.Tracks {
// fmt.Printf("%+v", val)
// break
// fields = append(fields, slack.AttachmentField{
// Title: fmt.Sprintf("%s, %s", val.Artists[0].Name, val.Album.Name),
// Value: val.Name,
// })
attachment := slack.Attachment{
Pretext: fmt.Sprintf("%s, %s", val.Artists[0].Name, val.Album.Name),
Title: val.Name,
ThumbURL: val.Album.Images[1].URL,
Text: fmt.Sprintf("@lenny add %s", getTrackId(val.Endpoint)),
// Actions: []slack.AttachmentAction{
// slack.AttachmentAction{
// Name: "AddToSpotify",
// Text: "Add",
// Value: val.Endpoint,
// Type: "button",
// },
// },
// CallbackID: "add_track",
}
attachments = append(attachments, attachment)
}
}
if len(attachments) > 0 {
replyChannel.Attachments = attachments
c <- replyChannel
}
case "add":
trackId := commandArray[2]
client.AddTracksToPlaylist(userId, playlistId, spotify.ID(trackId))
attachment := slack.Attachment{
Pretext: "Added da track",
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "list":
attachments := make([]slack.Attachment, 0)
pl, _ := client.GetPlaylist(userId, playlistId)
for _, val := range pl.Tracks.Tracks {
attachment := slack.Attachment{
Pretext: fmt.Sprintf("%s, %s", val.Track.Artists[0].Name, val.Track.Album.Name),
Title: val.Track.Name,
Text: fmt.Sprintf("@lenny play %s", val.Track.Endpoint),
}
attachments = append(attachments, attachment)
}
replyChannel.Attachments = attachments
c <- replyChannel
case "set-time":
requestedTime := commandArray[2]
reqTime, err := time.Parse(time.Kitchen, requestedTime)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, fmt.Sprintf("%s, must be in format 3:00PM/AM", err))
c <- replyChannel
return
}
t := time.Now()
n := time.Date(t.Year(), t.Month(), t.Day(), reqTime.Hour(), reqTime.Minute(), 0, 0, t.Location())
log.Println("RQUESTED TIME: ", n)
standupChannel.StandupTime = n
sc <- standupChannel
case "set-times":
attachments := make([]slack.Attachment, 0)
for i := 0; i < len(setTimes); i++ {
attachment := slack.Attachment{
Text: fmt.Sprintf("set time: %s", setTimes[i]),
}
attachments = append(attachments, attachment)
}
replyChannel.Attachments = attachments
c <- replyChannel
case "set-genres":
genresSeeds = make([]string, 0)
if commandArray[2] == "random" | else {
for i := 2; i < len(commandArray); i++ {
genresSeeds = append(genresSeeds, commandArray[i])
}
}
case "current":
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Text: cp.Item.Artists[0].Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "genres":
if genres, err := client.GetAvailableGenreSeeds(); err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
c <- replyChannel
return
} else {
var genresBuffer bytes.Buffer
for _, val := range genres {
genresBuffer.WriteString(fmt.Sprintf("%s\n", val))
}
attachment := slack.Attachment{
Text: genresBuffer.String(),
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
case "tribute":
track, _ := client.GetTrack("2HB4bT5bvUWDbOjNpIwmNi")
uris := []spotify.URI{spotify.URI("spotify:track:2HB4bT5bvUWDbOjNpIwmNi")}
opts := &spotify.PlayOptions{URIs: uris}
client.PlayOpt(opts)
attachment := slack.Attachment{
Title: "RIP Chris Cornell",
Text: fmt.Sprintf("%s, %s", track.Album.Name, track.Name),
ImageURL: track.Album.Images[0].URL,
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
default:
attachment := slack.Attachment{
Pretext: "Command error",
Text: "I'm too smashed to play any tunes",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
}
}
func getErrorAttachment(err error, txt string) []slack.Attachment {
attachment := slack.Attachment{
Pretext: "I BROKE !!",
Text: fmt.Sprintf("%s", err),
}
if txt != "" {
attachment.Text = txt
}
return []slack.Attachment{attachment}
}
func timeToPlayMusic(ch chan ReplyChannel, sc *slack.Channel) {
var replyChannel ReplyChannel
replyChannel.Channel = sc
if len(genresSeeds) == 0 {
genresSeeds = append(genresSeeds, "alternative")
}
seeds := spotify.Seeds{
Genres: genresSeeds,
//Artists: []spotify.ID{spotify.ID("5xUf6j4upBrXZPg6AI4MRK"), spotify.ID("2ziB7fzrXBoh1HUPS6sVFn")},
}
trackAttr := spotify.NewTrackAttributes()
// trackAttr.MaxEnergy(1.0)
// trackAttr.TargetEnergy(1.0)
// trackAttr.TargetDanceability(1.0)
opts := &spotify.Options{}
recs, err := client.GetRecommendations(seeds, trackAttr, opts)
// recs, err := client.Search("soundgarden", spotify.SearchTypeArtist)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
log.Printf("%+v", recs)
//playOpts := &spotify.PlayOptions{}
uris := []spotify.URI{}
ids := []spotify.ID{}
for _, val := range recs.Tracks {
trackURL := fmt.Sprintf("spotify:track:%s", val.ID)
uris = append(uris, spotify.URI(trackURL))
ids = append(ids, val.ID)
}
// for _, val := range recs.Tracks.Tracks {
// trackURL := fmt.Sprintf("spotify:track:%s", val.ID)
// uris = append(uris, spotify.URI(trackURL))
// ids = append(ids, val.ID)
// }
err = client.ReplacePlaylistTracks(userId, playlistId, ids...)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
//playOpts.URIs = uris
// client.PlayOpt(&spotify.PlayOptions{
// DeviceID: spotify.ID("11f1a54fa480f1559f1dd0cfdf42e9451dc17cb7"),
// URIs: []spotify.URI{spotify.URI("https://api.spotify.com/v1/tracks/3XVBdLihbNbxUwZosxcGuJ")},
// })
client.Next()
err = client.Play()
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
time.Sleep(2)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Text: cp.Item.Artists[0].Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
ch <- replyChannel
}
func handleStandupTimer(c chan ReplyChannel) {
var replyChannel ReplyChannel
for {
sc := <-standupChannel
replyChannel.Channel = sc.Channel
attachment := slack.Attachment{
Pretext: "Set time engaged",
Text: fmt.Sprintf("I will blast some tunes at %s", sc.StandupTime.Format("3:04PM")),
}
setTime := sc.StandupTime.Format("15:04")
newcron := gocron.NewScheduler()
newcron.Every(1).Monday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Tuesday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Wednesday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Thursday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Friday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
setTimes = append(setTimes, setTime)
go func() {
<-newcron.Start()
}()
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
}
func getTrackId(trackEndpoint string) string {
strs := strings.Split(trackEndpoint, "/")
return strs[len(strs)-1]
}
func handleReply() {
for {
ac := <-replyChannel
params := slack.PostMessageParameters{}
params.AsUser = true
params.Attachments = ac.Attachments
params.UnfurlMedia = true
fmt.Println("Channel: ", ac.Channel.Name)
_, _, errPostMessage := api.PostMessage(ac.Channel.Name, ac.DisplayTitle, params)
if errPostMessage != nil {
log.Fatal(errPostMessage)
}
}
}
| {
attachments := make([]slack.Attachment, 0)
genres, _ := client.GetAvailableGenreSeeds()
for i := 1; i < 5; i++ {
rand.Seed(time.Now().UTC().UnixNano())
genre := genres[rand.Intn(len(genres))]
attachment := slack.Attachment{
Text: fmt.Sprintf("added genre %s", genre),
}
attachments = append(attachments, attachment)
genresSeeds = append(genresSeeds, genre)
}
replyChannel.Attachments = attachments
c <- replyChannel
} | conditional_block |
motorhead.go | package main
import (
"bytes"
"fmt"
"github.com/jasonlvhit/gocron"
"github.com/nlopes/slack"
"github.com/zmb3/spotify"
"log"
"math/rand"
"os"
"strings"
"time"
)
type BotCentral struct {
Channel *slack.Channel
Event *slack.MessageEvent
UserId string
}
type ReplyChannel struct {
Channel *slack.Channel
Attachments []slack.Attachment
DisplayTitle string
}
type StandupChannel struct {
Channel *slack.Channel
StandupTime time.Time
}
var (
commandChannel chan *BotCentral
replyChannel chan ReplyChannel
standupChannel chan StandupChannel
api *slack.Client
standupTime time.Time
genresSeeds []string
setTimes []string
)
func startBot() {
token := os.Getenv("SLACK_TOKEN")
api = slack.New(token)
rtm := api.NewRTM()
commandChannel = make(chan *BotCentral)
replyChannel = make(chan ReplyChannel)
standupChannel = make(chan StandupChannel)
go rtm.ManageConnection()
go handleCommands(replyChannel, standupChannel)
go handleReply()
go handleStandupTimer(replyChannel)
Loop:
for {
select {
case msg := <-rtm.IncomingEvents:
switch ev := msg.Data.(type) {
case *slack.ConnectedEvent:
fmt.Println("Connection counter:", ev.ConnectionCount)
case *slack.MessageEvent:
channelInfo, err := api.GetChannelInfo(ev.Channel)
if err != nil {
log.Println(err)
}
botCentral := &BotCentral{
Channel: channelInfo,
Event: ev,
UserId: ev.User,
}
if ev.Type == "message" && strings.HasPrefix(ev.Text, "<@"+rtm.GetInfo().User.ID+">") {
commandChannel <- botCentral
}
case *slack.RTMError:
fmt.Printf("Error: %s\n", ev.Error())
case *slack.InvalidAuthEvent:
fmt.Printf("Invalid credentials")
break Loop
default:
//Take no action
}
}
}
}
func handleCommands(c chan ReplyChannel, sc chan StandupChannel) {
commands := map[string]string{
"help": "see the available commands",
"play": "play some tunes LENNY !",
"stop": "pause|stop da music",
"next": "next tune LENNY, this ain't any good",
"previous": "dat tune was ace, play it again LENNY",
"add": "queue dis beats",
"list": "list me current tunes",
"set-time": "When's my set time ?",
"set-times": "Give me the set times",
"genres": "List me some flavours",
"current": "What's currently on da radio?",
"search": "Where are all the tunes ?",
}
var replyChannel ReplyChannel
var standupChannel StandupChannel
for {
botChannel := <-commandChannel
replyChannel.Channel = botChannel.Channel
standupChannel.Channel = botChannel.Channel
commandArray := strings.Fields(botChannel.Event.Text)
log.Printf("%+v\n", commandArray)
switch commandArray[1] {
case "help":
fields := make([]slack.AttachmentField, 0)
for k, v := range commands {
fields = append(fields, slack.AttachmentField{
Title: "@lenny " + k,
Value: v,
})
}
attachment := slack.Attachment{
Pretext: "Rockn' tune commands",
Color: "#85929E",
Fields: fields,
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "play":
client.Play()
time.Sleep(2)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "stop":
client.Pause()
attachment := slack.Attachment{
Pretext: "Pausing da tunes",
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "next":
client.Next()
time.Sleep(2 * time.Second)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "previous":
client.Previous()
time.Sleep(2 * time.Second)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "search":
//fields := make([]slack.AttachmentField, 0)
attachments := make([]slack.Attachment, 0)
var buffer bytes.Buffer
for i := 2; i < len(commandArray); i++ {
buffer.WriteString(commandArray[i])
buffer.WriteString(" ")
}
r, _ := client.Search(buffer.String(), spotify.SearchTypeTrack)
if r != nil {
for _, val := range r.Tracks.Tracks {
// fmt.Printf("%+v", val)
// break
// fields = append(fields, slack.AttachmentField{
// Title: fmt.Sprintf("%s, %s", val.Artists[0].Name, val.Album.Name),
// Value: val.Name,
// })
attachment := slack.Attachment{
Pretext: fmt.Sprintf("%s, %s", val.Artists[0].Name, val.Album.Name),
Title: val.Name,
ThumbURL: val.Album.Images[1].URL,
Text: fmt.Sprintf("@lenny add %s", getTrackId(val.Endpoint)),
// Actions: []slack.AttachmentAction{
// slack.AttachmentAction{
// Name: "AddToSpotify",
// Text: "Add",
// Value: val.Endpoint,
// Type: "button",
// },
// },
// CallbackID: "add_track",
}
attachments = append(attachments, attachment)
}
}
if len(attachments) > 0 {
replyChannel.Attachments = attachments
c <- replyChannel
}
case "add":
trackId := commandArray[2]
client.AddTracksToPlaylist(userId, playlistId, spotify.ID(trackId))
attachment := slack.Attachment{
Pretext: "Added da track",
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "list":
attachments := make([]slack.Attachment, 0)
pl, _ := client.GetPlaylist(userId, playlistId)
for _, val := range pl.Tracks.Tracks {
attachment := slack.Attachment{
Pretext: fmt.Sprintf("%s, %s", val.Track.Artists[0].Name, val.Track.Album.Name),
Title: val.Track.Name,
Text: fmt.Sprintf("@lenny play %s", val.Track.Endpoint),
}
attachments = append(attachments, attachment)
}
replyChannel.Attachments = attachments
c <- replyChannel
case "set-time":
requestedTime := commandArray[2]
reqTime, err := time.Parse(time.Kitchen, requestedTime)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, fmt.Sprintf("%s, must be in format 3:00PM/AM", err))
c <- replyChannel
return
}
t := time.Now()
n := time.Date(t.Year(), t.Month(), t.Day(), reqTime.Hour(), reqTime.Minute(), 0, 0, t.Location())
log.Println("RQUESTED TIME: ", n)
standupChannel.StandupTime = n
sc <- standupChannel
case "set-times":
attachments := make([]slack.Attachment, 0)
for i := 0; i < len(setTimes); i++ {
attachment := slack.Attachment{
Text: fmt.Sprintf("set time: %s", setTimes[i]),
}
attachments = append(attachments, attachment)
}
replyChannel.Attachments = attachments
c <- replyChannel
case "set-genres":
genresSeeds = make([]string, 0)
if commandArray[2] == "random" {
attachments := make([]slack.Attachment, 0)
genres, _ := client.GetAvailableGenreSeeds()
for i := 1; i < 5; i++ {
rand.Seed(time.Now().UTC().UnixNano())
genre := genres[rand.Intn(len(genres))]
attachment := slack.Attachment{
Text: fmt.Sprintf("added genre %s", genre),
}
attachments = append(attachments, attachment)
genresSeeds = append(genresSeeds, genre)
}
replyChannel.Attachments = attachments
c <- replyChannel
} else {
for i := 2; i < len(commandArray); i++ {
genresSeeds = append(genresSeeds, commandArray[i])
}
}
case "current":
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Text: cp.Item.Artists[0].Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
case "genres":
if genres, err := client.GetAvailableGenreSeeds(); err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
c <- replyChannel
return
} else {
var genresBuffer bytes.Buffer
for _, val := range genres {
genresBuffer.WriteString(fmt.Sprintf("%s\n", val))
}
attachment := slack.Attachment{
Text: genresBuffer.String(),
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
case "tribute":
track, _ := client.GetTrack("2HB4bT5bvUWDbOjNpIwmNi")
uris := []spotify.URI{spotify.URI("spotify:track:2HB4bT5bvUWDbOjNpIwmNi")}
opts := &spotify.PlayOptions{URIs: uris}
client.PlayOpt(opts)
attachment := slack.Attachment{
Title: "RIP Chris Cornell",
Text: fmt.Sprintf("%s, %s", track.Album.Name, track.Name),
ImageURL: track.Album.Images[0].URL,
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
default:
attachment := slack.Attachment{
Pretext: "Command error",
Text: "I'm too smashed to play any tunes",
}
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
}
}
func getErrorAttachment(err error, txt string) []slack.Attachment {
attachment := slack.Attachment{
Pretext: "I BROKE !!",
Text: fmt.Sprintf("%s", err),
}
if txt != "" {
attachment.Text = txt
}
return []slack.Attachment{attachment}
}
func timeToPlayMusic(ch chan ReplyChannel, sc *slack.Channel) {
var replyChannel ReplyChannel
replyChannel.Channel = sc
if len(genresSeeds) == 0 {
genresSeeds = append(genresSeeds, "alternative")
}
seeds := spotify.Seeds{
Genres: genresSeeds,
//Artists: []spotify.ID{spotify.ID("5xUf6j4upBrXZPg6AI4MRK"), spotify.ID("2ziB7fzrXBoh1HUPS6sVFn")},
}
trackAttr := spotify.NewTrackAttributes()
// trackAttr.MaxEnergy(1.0)
// trackAttr.TargetEnergy(1.0)
// trackAttr.TargetDanceability(1.0)
opts := &spotify.Options{}
recs, err := client.GetRecommendations(seeds, trackAttr, opts)
// recs, err := client.Search("soundgarden", spotify.SearchTypeArtist)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
log.Printf("%+v", recs)
//playOpts := &spotify.PlayOptions{}
uris := []spotify.URI{}
ids := []spotify.ID{}
for _, val := range recs.Tracks {
trackURL := fmt.Sprintf("spotify:track:%s", val.ID)
uris = append(uris, spotify.URI(trackURL))
ids = append(ids, val.ID)
}
// for _, val := range recs.Tracks.Tracks {
// trackURL := fmt.Sprintf("spotify:track:%s", val.ID)
// uris = append(uris, spotify.URI(trackURL))
// ids = append(ids, val.ID)
// }
err = client.ReplacePlaylistTracks(userId, playlistId, ids...)
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
//playOpts.URIs = uris
// client.PlayOpt(&spotify.PlayOptions{
// DeviceID: spotify.ID("11f1a54fa480f1559f1dd0cfdf42e9451dc17cb7"),
// URIs: []spotify.URI{spotify.URI("https://api.spotify.com/v1/tracks/3XVBdLihbNbxUwZosxcGuJ")},
// })
client.Next()
err = client.Play()
if err != nil {
replyChannel.Attachments = getErrorAttachment(err, "")
ch <- replyChannel
return
}
time.Sleep(2)
cp, _ := client.PlayerCurrentlyPlaying()
attachment := slack.Attachment{
Pretext: cp.Item.Name,
Text: cp.Item.Artists[0].Name,
Color: "#85929E",
}
replyChannel.Attachments = []slack.Attachment{attachment}
ch <- replyChannel
}
func handleStandupTimer(c chan ReplyChannel) |
func getTrackId(trackEndpoint string) string {
strs := strings.Split(trackEndpoint, "/")
return strs[len(strs)-1]
}
func handleReply() {
for {
ac := <-replyChannel
params := slack.PostMessageParameters{}
params.AsUser = true
params.Attachments = ac.Attachments
params.UnfurlMedia = true
fmt.Println("Channel: ", ac.Channel.Name)
_, _, errPostMessage := api.PostMessage(ac.Channel.Name, ac.DisplayTitle, params)
if errPostMessage != nil {
log.Fatal(errPostMessage)
}
}
}
| {
var replyChannel ReplyChannel
for {
sc := <-standupChannel
replyChannel.Channel = sc.Channel
attachment := slack.Attachment{
Pretext: "Set time engaged",
Text: fmt.Sprintf("I will blast some tunes at %s", sc.StandupTime.Format("3:04PM")),
}
setTime := sc.StandupTime.Format("15:04")
newcron := gocron.NewScheduler()
newcron.Every(1).Monday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Tuesday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Wednesday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Thursday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
newcron.Every(1).Friday().At(setTime).Do(timeToPlayMusic, c, sc.Channel)
setTimes = append(setTimes, setTime)
go func() {
<-newcron.Start()
}()
replyChannel.Attachments = []slack.Attachment{attachment}
c <- replyChannel
}
} | identifier_body |
main.rs | //! Default Compute@Edge template program.
use fastly::http::{header, HeaderValue, Method, StatusCode};
use fastly::{mime, Dictionary, Error, Request, Response};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// The name of a backend server associated with this service.
///
/// This should be changed to match the name of your own backend. See the the `Hosts` section of
/// the Fastly WASM service UI for more information.
const FASTLY_API_BACKEND_NAME: &str = "fastly_api_backend";
const FASTLY_API_BASE: &str = "https://api.fastly.com";
const FASTLY_API_DATACENTER_ENDPOINT: &str = "https://api.fastly.com/datacenters";
/// The name of a second backend associated with this service.
const POP_STATUS_API_BACKEND_NAME: &str = "pop_status_backend";
const POP_STATUS_API_ENDPOINT: &str = "https://service-scraper.edgecompute.app/";
const APP_DATA_DICT: &str = "app_data";
const STATUS_VALUES: &[&str] = &[
"Operational",
"Degraded Performance",
"Partial Outage",
"Major Outage",
"Maintenance",
"Not Available",
];
#[derive(Serialize, Deserialize, Debug)]
struct Coordinates {
x: u32,
y: u32,
latitude: f64,
longitude: f64,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopData {
code: String,
name: String,
group: String,
coordinates: Coordinates,
shield: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
struct StatusData {
code: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusData {
code: String,
name: String,
latitude: f64,
longitude: f64,
group: String,
shield: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusResponse {
current_pop: String,
pop_status_data: Vec<PopStatusData>,
}
#[derive(Serialize, Deserialize, Debug)]
struct DictionaryInfo {
dictionary_id: String,
service_id: String,
item_key: String,
item_value: String,
}
/// The entry point for your application.
///
/// This function is triggered when your service receives a client request. It could be used to
/// route based on the request properties (such as method or path), send the request to a backend,
/// make completely new requests, and/or generate synthetic responses.
///
/// If `main` returns an error, a 500 error response will be delivered to the client.
#[fastly::main]
fn main(req: Request) -> Result<Response, Error> {
println!(
"Amy and the Geeks version:{}",
std::env::var("FASTLY_SERVICE_VERSION").unwrap_or_else(|_| String::new())
);
let current_pop = std::env::var("FASTLY_POP").unwrap_or_else(|_| String::new());
println!("Current:{}", current_pop);
// Filter request methods...
match req.get_method() {
// Allow GET and HEAD requests.
&Method::GET | &Method::HEAD | &Method::PUT => (),
// Accept PURGE requests; it does not matter to which backend they are sent.
m if m == "PURGE" => (),
// Deny anything else.
_ => {
return Ok(Response::from_status(StatusCode::METHOD_NOT_ALLOWED)
.with_header(header::ALLOW, "GET, HEAD")
.with_body_text_plain("This method is not allowed\n"))
}
};
let app_data_dict = Dictionary::open(APP_DATA_DICT);
let service_id = std::env::var("FASTLY_SERVICE_ID").unwrap_or_else(|_| String::new());
// We need the dictionary id for API calls.
let dict_id = app_data_dict.get("dict_id").unwrap();
let fsly_api_token = app_data_dict.get("api_key").unwrap();
let the_path = req.get_path();
println!("Path: {}", the_path);
// Pattern match on the path.
match the_path {
// If request is to the `/` path, send a default response.
"/" | "/noscrape" => {
let pop_response = Request::new(Method::GET, FASTLY_API_DATACENTER_ENDPOINT)
.with_header("Fastly-Key", &fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME)?;
let body_str = pop_response.into_body_str();
let pop_vec: Vec<PopData> = serde_json::from_str(&body_str).unwrap();
let mut status_map: Option<HashMap<&str, &str>> = None;
let status_vec: Vec<StatusData>;
if the_path != "/noscrape" {
let status_response = Request::new(Method::GET, POP_STATUS_API_ENDPOINT)
.with_header(header::ACCEPT, "application/json")
.send(POP_STATUS_API_BACKEND_NAME)?;
println!("Status response: {:?}", status_response.get_status());
let status_body_str = status_response.into_body_str();
// println!("Status body: {}", &status_body_str);
status_vec = serde_json::from_str(&status_body_str).unwrap();
status_map = Some(
status_vec
.iter()
.map(|status| (status.code.as_str(), status.status.as_str()))
.collect(),
);
}
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let modified_pop_status_map: HashMap<&str, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let pop_status_vec: Vec<PopStatusData> = pop_vec
.iter()
.map(|pop| {
let pop_code = pop.code.to_string();
let status = get_pop_status(&pop_code, &status_map, &modified_pop_status_map);
let shield = match &pop.shield {
Some(s) => s,
None => "",
};
PopStatusData {
code: pop_code,
name: pop.name.to_string(),
latitude: pop.coordinates.latitude,
longitude: pop.coordinates.longitude,
group: pop.group.to_string(),
shield: shield.to_string(),
status,
}
})
.collect();
let pop_status_response: PopStatusResponse = PopStatusResponse {
current_pop,
pop_status_data: pop_status_vec,
};
let pop_status_json = serde_json::to_string(&pop_status_response)?;
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(pop_status_json))
}
"/set_pop" => {
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() |
let modified_pop_status= modified_pop_status_opt.unwrap();
let mut modified_pop_status_map: HashMap<String, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let query_params: Vec<(String, String)> = req.get_query().unwrap();
println!("QP: {:?}", query_params);
if query_params.is_empty() {
let response = Response::from_body(modified_pop_status)
.with_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
);
return Ok(response);
}
for (pop, status) in query_params {
if pop == "*" {
if status == "-" {
modified_pop_status_map.clear();
} else {
modified_pop_status_map
.insert("*".to_string(), status.parse::<u8>().unwrap());
}
} else {
if status == "-" {
modified_pop_status_map.remove(pop.as_str());
} else {
modified_pop_status_map.insert(pop, status.parse::<u8>().unwrap());
}
}
}
// /service/service_id/dictionary/dictionary_id/item/dictionary_item_key
let the_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
let the_body = format!(
"item_value={}",
serde_json::to_string(&modified_pop_status_map)?
);
let dict_api_response = Request::new(Method::PUT, the_url)
.with_header("Fastly-Key", fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.with_header(header::CONTENT_TYPE, "application/x-www-form-urlencoded")
.with_body(the_body)
.send(FASTLY_API_BACKEND_NAME)?;
if dict_api_response.get_status() == StatusCode::OK {
let body_str = dict_api_response.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(dict_info.item_value))
} else {
Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem mofifying dictionary\n"))
}
}
// Catch all other requests and return a 404.
_ => Ok(Response::from_status(StatusCode::NOT_FOUND)
.with_body_text_plain("The page you requested could not be found\n")),
}
}
fn get_pop_status(
pop_code: &str,
status_map: &Option<HashMap<&str, &str>>,
modified_pop_status_vec: &HashMap<&str, u8>,
) -> String {
if modified_pop_status_vec.contains_key("*") {
let pc_index = modified_pop_status_vec["*"];
if pc_index < STATUS_VALUES.len() as u8 {
STATUS_VALUES[pc_index as usize].to_string()
} else {
get_status_from_map(pop_code, status_map)
}
} else {
match modified_pop_status_vec.get(pop_code) {
Some(pc_index) => STATUS_VALUES[*pc_index as usize].to_string(),
None => get_status_from_map(pop_code, status_map),
}
}
}
fn get_status_from_map(pop_code: &str, status_map: &Option<HashMap<&str, &str>>) -> String {
match status_map {
Some(map) => match map.get(pop_code) {
Some(status) => status.parse().unwrap(),
None => "Not Available".to_string(),
},
None => "Not Available".to_string(),
}
}
// This is calling the Fastly API to get the dictionary. You might ask why I'm not just accessing
// it on the edge. Reason being to avoid a race where we read it on the edge then write it with the
// API. Still not ideal as there could be a race with another pop but it will do until we have a
// KV store
fn get_modified_pop_status(service_id: &str, dict_id: &str, api_token: &str) -> Option<String> {
let dict_item_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_resp = Request::new(Method::GET, dict_item_url)
.with_header("Fastly-Key", api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME).unwrap();
if modified_pop_status_resp.get_status() == StatusCode::OK {
let body_str = modified_pop_status_resp.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
let modified_pop_status = dict_info.item_value;
println!("MPS: {}", modified_pop_status);
Some(modified_pop_status)
} else {
None
}
}
| {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
} | conditional_block |
main.rs | //! Default Compute@Edge template program.
use fastly::http::{header, HeaderValue, Method, StatusCode};
use fastly::{mime, Dictionary, Error, Request, Response};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// The name of a backend server associated with this service.
///
/// This should be changed to match the name of your own backend. See the the `Hosts` section of
/// the Fastly WASM service UI for more information.
const FASTLY_API_BACKEND_NAME: &str = "fastly_api_backend";
const FASTLY_API_BASE: &str = "https://api.fastly.com";
const FASTLY_API_DATACENTER_ENDPOINT: &str = "https://api.fastly.com/datacenters";
/// The name of a second backend associated with this service.
const POP_STATUS_API_BACKEND_NAME: &str = "pop_status_backend";
const POP_STATUS_API_ENDPOINT: &str = "https://service-scraper.edgecompute.app/";
const APP_DATA_DICT: &str = "app_data";
const STATUS_VALUES: &[&str] = &[
"Operational",
"Degraded Performance",
"Partial Outage",
"Major Outage",
"Maintenance",
"Not Available",
];
#[derive(Serialize, Deserialize, Debug)]
struct Coordinates {
x: u32,
y: u32,
latitude: f64,
longitude: f64,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopData {
code: String,
name: String,
group: String,
coordinates: Coordinates,
shield: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
struct StatusData {
code: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusData {
code: String,
name: String,
latitude: f64,
longitude: f64,
group: String,
shield: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct | {
current_pop: String,
pop_status_data: Vec<PopStatusData>,
}
#[derive(Serialize, Deserialize, Debug)]
struct DictionaryInfo {
dictionary_id: String,
service_id: String,
item_key: String,
item_value: String,
}
/// The entry point for your application.
///
/// This function is triggered when your service receives a client request. It could be used to
/// route based on the request properties (such as method or path), send the request to a backend,
/// make completely new requests, and/or generate synthetic responses.
///
/// If `main` returns an error, a 500 error response will be delivered to the client.
#[fastly::main]
fn main(req: Request) -> Result<Response, Error> {
println!(
"Amy and the Geeks version:{}",
std::env::var("FASTLY_SERVICE_VERSION").unwrap_or_else(|_| String::new())
);
let current_pop = std::env::var("FASTLY_POP").unwrap_or_else(|_| String::new());
println!("Current:{}", current_pop);
// Filter request methods...
match req.get_method() {
// Allow GET and HEAD requests.
&Method::GET | &Method::HEAD | &Method::PUT => (),
// Accept PURGE requests; it does not matter to which backend they are sent.
m if m == "PURGE" => (),
// Deny anything else.
_ => {
return Ok(Response::from_status(StatusCode::METHOD_NOT_ALLOWED)
.with_header(header::ALLOW, "GET, HEAD")
.with_body_text_plain("This method is not allowed\n"))
}
};
let app_data_dict = Dictionary::open(APP_DATA_DICT);
let service_id = std::env::var("FASTLY_SERVICE_ID").unwrap_or_else(|_| String::new());
// We need the dictionary id for API calls.
let dict_id = app_data_dict.get("dict_id").unwrap();
let fsly_api_token = app_data_dict.get("api_key").unwrap();
let the_path = req.get_path();
println!("Path: {}", the_path);
// Pattern match on the path.
match the_path {
// If request is to the `/` path, send a default response.
"/" | "/noscrape" => {
let pop_response = Request::new(Method::GET, FASTLY_API_DATACENTER_ENDPOINT)
.with_header("Fastly-Key", &fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME)?;
let body_str = pop_response.into_body_str();
let pop_vec: Vec<PopData> = serde_json::from_str(&body_str).unwrap();
let mut status_map: Option<HashMap<&str, &str>> = None;
let status_vec: Vec<StatusData>;
if the_path != "/noscrape" {
let status_response = Request::new(Method::GET, POP_STATUS_API_ENDPOINT)
.with_header(header::ACCEPT, "application/json")
.send(POP_STATUS_API_BACKEND_NAME)?;
println!("Status response: {:?}", status_response.get_status());
let status_body_str = status_response.into_body_str();
// println!("Status body: {}", &status_body_str);
status_vec = serde_json::from_str(&status_body_str).unwrap();
status_map = Some(
status_vec
.iter()
.map(|status| (status.code.as_str(), status.status.as_str()))
.collect(),
);
}
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let modified_pop_status_map: HashMap<&str, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let pop_status_vec: Vec<PopStatusData> = pop_vec
.iter()
.map(|pop| {
let pop_code = pop.code.to_string();
let status = get_pop_status(&pop_code, &status_map, &modified_pop_status_map);
let shield = match &pop.shield {
Some(s) => s,
None => "",
};
PopStatusData {
code: pop_code,
name: pop.name.to_string(),
latitude: pop.coordinates.latitude,
longitude: pop.coordinates.longitude,
group: pop.group.to_string(),
shield: shield.to_string(),
status,
}
})
.collect();
let pop_status_response: PopStatusResponse = PopStatusResponse {
current_pop,
pop_status_data: pop_status_vec,
};
let pop_status_json = serde_json::to_string(&pop_status_response)?;
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(pop_status_json))
}
"/set_pop" => {
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let mut modified_pop_status_map: HashMap<String, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let query_params: Vec<(String, String)> = req.get_query().unwrap();
println!("QP: {:?}", query_params);
if query_params.is_empty() {
let response = Response::from_body(modified_pop_status)
.with_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
);
return Ok(response);
}
for (pop, status) in query_params {
if pop == "*" {
if status == "-" {
modified_pop_status_map.clear();
} else {
modified_pop_status_map
.insert("*".to_string(), status.parse::<u8>().unwrap());
}
} else {
if status == "-" {
modified_pop_status_map.remove(pop.as_str());
} else {
modified_pop_status_map.insert(pop, status.parse::<u8>().unwrap());
}
}
}
// /service/service_id/dictionary/dictionary_id/item/dictionary_item_key
let the_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
let the_body = format!(
"item_value={}",
serde_json::to_string(&modified_pop_status_map)?
);
let dict_api_response = Request::new(Method::PUT, the_url)
.with_header("Fastly-Key", fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.with_header(header::CONTENT_TYPE, "application/x-www-form-urlencoded")
.with_body(the_body)
.send(FASTLY_API_BACKEND_NAME)?;
if dict_api_response.get_status() == StatusCode::OK {
let body_str = dict_api_response.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(dict_info.item_value))
} else {
Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem mofifying dictionary\n"))
}
}
// Catch all other requests and return a 404.
_ => Ok(Response::from_status(StatusCode::NOT_FOUND)
.with_body_text_plain("The page you requested could not be found\n")),
}
}
fn get_pop_status(
pop_code: &str,
status_map: &Option<HashMap<&str, &str>>,
modified_pop_status_vec: &HashMap<&str, u8>,
) -> String {
if modified_pop_status_vec.contains_key("*") {
let pc_index = modified_pop_status_vec["*"];
if pc_index < STATUS_VALUES.len() as u8 {
STATUS_VALUES[pc_index as usize].to_string()
} else {
get_status_from_map(pop_code, status_map)
}
} else {
match modified_pop_status_vec.get(pop_code) {
Some(pc_index) => STATUS_VALUES[*pc_index as usize].to_string(),
None => get_status_from_map(pop_code, status_map),
}
}
}
fn get_status_from_map(pop_code: &str, status_map: &Option<HashMap<&str, &str>>) -> String {
match status_map {
Some(map) => match map.get(pop_code) {
Some(status) => status.parse().unwrap(),
None => "Not Available".to_string(),
},
None => "Not Available".to_string(),
}
}
// This is calling the Fastly API to get the dictionary. You might ask why I'm not just accessing
// it on the edge. Reason being to avoid a race where we read it on the edge then write it with the
// API. Still not ideal as there could be a race with another pop but it will do until we have a
// KV store
fn get_modified_pop_status(service_id: &str, dict_id: &str, api_token: &str) -> Option<String> {
let dict_item_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_resp = Request::new(Method::GET, dict_item_url)
.with_header("Fastly-Key", api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME).unwrap();
if modified_pop_status_resp.get_status() == StatusCode::OK {
let body_str = modified_pop_status_resp.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
let modified_pop_status = dict_info.item_value;
println!("MPS: {}", modified_pop_status);
Some(modified_pop_status)
} else {
None
}
}
| PopStatusResponse | identifier_name |
main.rs | //! Default Compute@Edge template program.
use fastly::http::{header, HeaderValue, Method, StatusCode};
use fastly::{mime, Dictionary, Error, Request, Response};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// The name of a backend server associated with this service.
///
/// This should be changed to match the name of your own backend. See the the `Hosts` section of
/// the Fastly WASM service UI for more information.
const FASTLY_API_BACKEND_NAME: &str = "fastly_api_backend";
const FASTLY_API_BASE: &str = "https://api.fastly.com";
const FASTLY_API_DATACENTER_ENDPOINT: &str = "https://api.fastly.com/datacenters";
/// The name of a second backend associated with this service.
const POP_STATUS_API_BACKEND_NAME: &str = "pop_status_backend";
const POP_STATUS_API_ENDPOINT: &str = "https://service-scraper.edgecompute.app/";
const APP_DATA_DICT: &str = "app_data";
const STATUS_VALUES: &[&str] = &[
"Operational",
"Degraded Performance",
"Partial Outage",
"Major Outage",
"Maintenance",
"Not Available",
];
#[derive(Serialize, Deserialize, Debug)]
struct Coordinates {
x: u32,
y: u32,
latitude: f64,
longitude: f64,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopData {
code: String,
name: String,
group: String,
coordinates: Coordinates,
shield: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
struct StatusData {
code: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusData {
code: String,
name: String,
latitude: f64,
longitude: f64,
group: String,
shield: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusResponse {
current_pop: String,
pop_status_data: Vec<PopStatusData>,
}
#[derive(Serialize, Deserialize, Debug)]
struct DictionaryInfo {
dictionary_id: String,
service_id: String,
item_key: String,
item_value: String,
}
/// The entry point for your application.
///
/// This function is triggered when your service receives a client request. It could be used to
/// route based on the request properties (such as method or path), send the request to a backend,
/// make completely new requests, and/or generate synthetic responses.
///
/// If `main` returns an error, a 500 error response will be delivered to the client.
#[fastly::main]
fn main(req: Request) -> Result<Response, Error> {
println!(
"Amy and the Geeks version:{}",
std::env::var("FASTLY_SERVICE_VERSION").unwrap_or_else(|_| String::new())
);
let current_pop = std::env::var("FASTLY_POP").unwrap_or_else(|_| String::new());
println!("Current:{}", current_pop);
// Filter request methods...
match req.get_method() {
// Allow GET and HEAD requests.
&Method::GET | &Method::HEAD | &Method::PUT => (),
// Accept PURGE requests; it does not matter to which backend they are sent.
m if m == "PURGE" => (),
// Deny anything else.
_ => {
return Ok(Response::from_status(StatusCode::METHOD_NOT_ALLOWED)
.with_header(header::ALLOW, "GET, HEAD")
.with_body_text_plain("This method is not allowed\n"))
}
};
let app_data_dict = Dictionary::open(APP_DATA_DICT);
let service_id = std::env::var("FASTLY_SERVICE_ID").unwrap_or_else(|_| String::new());
// We need the dictionary id for API calls.
let dict_id = app_data_dict.get("dict_id").unwrap();
let fsly_api_token = app_data_dict.get("api_key").unwrap();
let the_path = req.get_path();
println!("Path: {}", the_path);
// Pattern match on the path.
match the_path {
// If request is to the `/` path, send a default response.
"/" | "/noscrape" => {
let pop_response = Request::new(Method::GET, FASTLY_API_DATACENTER_ENDPOINT)
.with_header("Fastly-Key", &fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME)?;
let body_str = pop_response.into_body_str();
let pop_vec: Vec<PopData> = serde_json::from_str(&body_str).unwrap();
let mut status_map: Option<HashMap<&str, &str>> = None;
let status_vec: Vec<StatusData>;
if the_path != "/noscrape" {
let status_response = Request::new(Method::GET, POP_STATUS_API_ENDPOINT)
.with_header(header::ACCEPT, "application/json")
.send(POP_STATUS_API_BACKEND_NAME)?;
println!("Status response: {:?}", status_response.get_status());
let status_body_str = status_response.into_body_str();
// println!("Status body: {}", &status_body_str);
status_vec = serde_json::from_str(&status_body_str).unwrap();
status_map = Some(
status_vec
.iter()
.map(|status| (status.code.as_str(), status.status.as_str()))
.collect(),
);
}
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let modified_pop_status_map: HashMap<&str, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let pop_status_vec: Vec<PopStatusData> = pop_vec
.iter()
.map(|pop| {
let pop_code = pop.code.to_string();
let status = get_pop_status(&pop_code, &status_map, &modified_pop_status_map);
let shield = match &pop.shield {
Some(s) => s,
None => "",
};
PopStatusData {
code: pop_code,
name: pop.name.to_string(), | group: pop.group.to_string(),
shield: shield.to_string(),
status,
}
})
.collect();
let pop_status_response: PopStatusResponse = PopStatusResponse {
current_pop,
pop_status_data: pop_status_vec,
};
let pop_status_json = serde_json::to_string(&pop_status_response)?;
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(pop_status_json))
}
"/set_pop" => {
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let mut modified_pop_status_map: HashMap<String, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let query_params: Vec<(String, String)> = req.get_query().unwrap();
println!("QP: {:?}", query_params);
if query_params.is_empty() {
let response = Response::from_body(modified_pop_status)
.with_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
);
return Ok(response);
}
for (pop, status) in query_params {
if pop == "*" {
if status == "-" {
modified_pop_status_map.clear();
} else {
modified_pop_status_map
.insert("*".to_string(), status.parse::<u8>().unwrap());
}
} else {
if status == "-" {
modified_pop_status_map.remove(pop.as_str());
} else {
modified_pop_status_map.insert(pop, status.parse::<u8>().unwrap());
}
}
}
// /service/service_id/dictionary/dictionary_id/item/dictionary_item_key
let the_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
let the_body = format!(
"item_value={}",
serde_json::to_string(&modified_pop_status_map)?
);
let dict_api_response = Request::new(Method::PUT, the_url)
.with_header("Fastly-Key", fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.with_header(header::CONTENT_TYPE, "application/x-www-form-urlencoded")
.with_body(the_body)
.send(FASTLY_API_BACKEND_NAME)?;
if dict_api_response.get_status() == StatusCode::OK {
let body_str = dict_api_response.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(dict_info.item_value))
} else {
Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem mofifying dictionary\n"))
}
}
// Catch all other requests and return a 404.
_ => Ok(Response::from_status(StatusCode::NOT_FOUND)
.with_body_text_plain("The page you requested could not be found\n")),
}
}
fn get_pop_status(
pop_code: &str,
status_map: &Option<HashMap<&str, &str>>,
modified_pop_status_vec: &HashMap<&str, u8>,
) -> String {
if modified_pop_status_vec.contains_key("*") {
let pc_index = modified_pop_status_vec["*"];
if pc_index < STATUS_VALUES.len() as u8 {
STATUS_VALUES[pc_index as usize].to_string()
} else {
get_status_from_map(pop_code, status_map)
}
} else {
match modified_pop_status_vec.get(pop_code) {
Some(pc_index) => STATUS_VALUES[*pc_index as usize].to_string(),
None => get_status_from_map(pop_code, status_map),
}
}
}
fn get_status_from_map(pop_code: &str, status_map: &Option<HashMap<&str, &str>>) -> String {
match status_map {
Some(map) => match map.get(pop_code) {
Some(status) => status.parse().unwrap(),
None => "Not Available".to_string(),
},
None => "Not Available".to_string(),
}
}
// This is calling the Fastly API to get the dictionary. You might ask why I'm not just accessing
// it on the edge. Reason being to avoid a race where we read it on the edge then write it with the
// API. Still not ideal as there could be a race with another pop but it will do until we have a
// KV store
fn get_modified_pop_status(service_id: &str, dict_id: &str, api_token: &str) -> Option<String> {
let dict_item_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_resp = Request::new(Method::GET, dict_item_url)
.with_header("Fastly-Key", api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME).unwrap();
if modified_pop_status_resp.get_status() == StatusCode::OK {
let body_str = modified_pop_status_resp.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
let modified_pop_status = dict_info.item_value;
println!("MPS: {}", modified_pop_status);
Some(modified_pop_status)
} else {
None
}
} | latitude: pop.coordinates.latitude,
longitude: pop.coordinates.longitude, | random_line_split |
1-built-in-function.py | # Python Built-in Function
# The Python interpreter has a number of functions that are always available for use. These functions are called built-in functions. For example, print() function prints the given object to the standard output device (screen) or to the text stream file.
# !MAP()
# map() is a built-in Python function that takes in two or more arguments: a function and one or more iterables, in the form:
# map(function, iterable, ...)
# map() returns an iterator - that is, map() returns a special object that yields one result at a time as needed. We will learn more about iterators and generators in a future lecture. For now, since our examples are so small, we will cast map() as a list to see the results immediately.
# When we went over list comprehensions we created a small expression to convert Celsius to Fahrenheit. Let's do the same here but use map:
# In [1]:
# def fahrenheit(celsius):
# return (9/5)*celsius + 32
# temps = [0, 22.5, 40, 100]
# Now let's see map() in action:
# In [2]:
# F_temps = map(fahrenheit, temps)
# #Show
# list(F_temps)
# Out[2]:
# [32.0, 72.5, 104.0, 212.0]
# In the example above, map() applies the fahrenheit function to every item in temps. However, we don't have to define our functions beforehand; we can use a lambda expression instead:
# In [3]:
# list(map(lambda x: (9/5)*x + 32, temps))
# Out[3]:
# [32.0, 72.5, 104.0, 212.0]
# Great! We got the same result! Using map with lambda expressions is much more common since the entire purpose of map() is to save effort on having to create manual for loops.
# map() with multiple iterables
# map() can accept more than one iterable. The iterables should be the same length - in the event that they are not, map() will stop as soon as the shortest iterable is exhausted.
# For instance, if our function is trying to add two values x and y, we can pass a list of x values and another list of y values to map(). The function (or lambda) will be fed the 0th index from each list, and then the 1st index, and so on until the n-th index is reached.
# Let's see this in action with two and then three lists:
# In [4]:
# a = [1,2,3,4]
# b = [5,6,7,8]
# c = [9,10,11,12]
# list(map(lambda x,y:x+y,a,b))
# Out[4]:
# [6, 8, 10, 12]
# In [5]:
# # Now all three lists
# list(map(lambda x,y,z:x+y+z,a,b,c))
# Out[5]:
# [15, 18, 21, 24]
# We can see in the example above that the parameter x gets its values from the list a, while y gets its values from b and z from list c. Go ahead and play with your own example to make sure you fully understand mapping to more than one iterable.
# Great job! You should now have a basic understanding of the map() function.
# !REDUCE()
# reduce()
# Many times students have difficulty understanding reduce() so pay careful attention to this lecture. The function reduce(function, sequence) continually applies the function to the sequence. It then returns a single value.
# If seq = [ s1, s2, s3, ... , sn ], calling reduce(function, sequence) works like this:
# At first the first two elements of seq will be applied to function, i.e. func(s1,s2)
# The list on which reduce() works looks now like this: [ function(s1, s2), s3, ... , sn ]
# In the next step the function will be applied on the previous result and the third element of the list, i.e. function(function(s1, s2),s3)
# The list looks like this now: [ function(function(s1, s2),s3), ... , sn ]
# It continues like this until just one element is left and return this element as the result of reduce()
# Let's see an example:
# In [1]:
# from functools import reduce
# lst =[47,11,42,13]
# reduce(lambda x,y: x+y,lst)
# Out[1]:
# 113
# Lets look at a diagram to get a better understanding of what is going on here:
# In [2]:
# from IPython.display import Image
# Image('http://www.python-course.eu/images/reduce_diagram.png')
# Out[2]:
# Note how we keep reducing the sequence until a single final value is obtained. Lets see another example:
# In [3]:
# #Find the maximum of a sequence (This already exists as max())
# max_find = lambda a,b: a if (a > b) else b
# In [4]:
# #Find max
# reduce(max_find,lst)
# Out[4]:
# 47
# Hopefully you can see how useful reduce can be in various situations. Keep it in mind as you think about your code projects!
# !FILTER()
# filter
# The function filter(function, list) offers a convenient way to filter out all the elements of an iterable, for which the function returns True.
# The function filter(function,list) needs a function as its first argument. The function needs to return a Boolean value (either True or False). This function will be applied to every element of the iterable. Only if the function returns True will the element of the iterable be included in the result.
# Like map(), filter() returns an iterator - that is, filter yields one result at a time as needed. Iterators and generators will be covered in an upcoming lecture. For now, since our examples are so small, we will cast filter() as a list to see our results immediately.
# Let's see some examples:
# In [1]:
# #First let's make a function
# def even_check(num):
# if num%2 ==0:
# return True
# Now let's filter a list of numbers. Note: putting the function into filter without any parentheses might feel strange, but keep in mind that functions are objects as well.
# In [2]:
# lst =range(20)
# list(filter(even_check,lst))
# Out[2]:
# [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
# filter() is more commonly used with lambda functions, because we usually use filter for a quick job where we don't want to write an entire function. Let's repeat the example above using a lambda expression:
# In [3]:
# list(filter(lambda x: x%2==0,lst))
# Out[3]:
# [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
# Great! You should now have a solid understanding of filter() and how to apply it to your code!
# !ZIP()
# zip
# zip() makes an iterator that aggregates elements from each of the iterables.
# Returns an iterator of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables. The iterator stops when the shortest input iterable is exhausted. With a single iterable argument, it returns an iterator of 1-tuples. With no arguments, it returns an empty iterator.
# zip() is equivalent to:
# def zip(*iterables):
# # zip('ABCD', 'xy') --> Ax By
# sentinel = object()
# iterators = [iter(it) for it in iterables]
# while iterators:
# result = []
# for it in iterators:
# elem = next(it, sentinel)
# if elem is sentinel:
# return
# result.append(elem) | # zip() should only be used with unequal length inputs when you don’t care about trailing, unmatched values from the longer iterables.
# Let's see it in action in some examples:
# Examples
# In [1]:
# x = [1,2,3]
# y = [4,5,6]
# # Zip the lists together
# list(zip(x,y))
# Out[1]:
# [(1, 4), (2, 5), (3, 6)]
# Note how tuples are returned. What if one iterable is longer than the other?
# In [2]:
# x = [1,2,3]
# y = [4,5,6,7,8]
# # Zip the lists together
# list(zip(x,y))
# Out[2]:
# [(1, 4), (2, 5), (3, 6)]
# Note how the zip is defined by the shortest iterable length. Its generally advised not to zip unequal length iterables unless your very sure you only need partial tuple pairings.
# What happens if we try to zip together dictionaries?
# In [3]:
# d1 = {'a':1,'b':2}
# d2 = {'c':4,'d':5}
# list(zip(d1,d2))
# Out[3]:
# [('a', 'c'), ('b', 'd')]
# This makes sense because simply iterating through the dictionaries will result in just the keys. We would have to call methods to mix keys and values:
# In [4]:
# list(zip(d2,d1.values()))
# Out[4]:
# [('c', 1), ('d', 2)]
# Great! Finally lets use zip() to switch the keys and values of the two dictionaries:
# In [5]:
# def switcharoo(d1,d2):
# dout = {}
# for d1key,d2val in zip(d1,d2.values()):
# dout[d1key] = d2val
# return dout
# In [6]:
# switcharoo(d1,d2)
# Out[6]:
# {'a': 4, 'b': 5}
# Great! You can use zip to save a lot of typing in many situations! You should now have a good understanding of zip() and some possible use cases.
# !ENUMERATE()
# enumerate()
# In this lecture we will learn about an extremely useful built-in function: enumerate(). Enumerate allows you to keep a count as you iterate through an object. It does this by returning a tuple in the form (count,element). The function itself is equivalent to:
# def enumerate(sequence, start=0):
# n = start
# for elem in sequence:
# yield n, elem
# n += 1
# Example
# In [1]:
# lst = ['a','b','c']
# for number,item in enumerate(lst):
# print(number)
# print(item)
# 0
# a
# 1
# b
# 2
# c
# enumerate() becomes particularly useful when you have a case where you need to have some sort of tracker. For example:
# In [2]:
# for count,item in enumerate(lst):
# if count >= 2:
# break
# else:
# print(item)
# a
# b
# enumerate() takes an optional "start" argument to override the default value of zero:
# In [3]:
# months = ['March','April','May','June']
# list(enumerate(months,start=3))
# Out[3]:
# [(3, 'March'), (4, 'April'), (5, 'May'), (6, 'June')]
# Great! You should now have a good understanding of enumerate and its potential use cases.
#! ALL() and ANY
# all() and any()
# all() and any() are built-in functions in Python that allow us to conveniently check for boolean matching in an iterable. all() will return True if all elements in an iterable are True. It is the same as this function code:
# def all(iterable):
# for element in iterable:
# if not element:
# return False
# return True
# any() will return True if any of the elements in the iterable are True. It is equivalent to the following function code:
# def any(iterable):
# for element in iterable:
# if element:
# return True
# return False
# Let's see a few examples of these functions. They should be fairly straightforward:
# In [1]:
# lst = [True,True,False,True]
# In [2]:
# all(lst)
# Out[2]:
# False
# Returns False because not all elements are True.
# In [3]:
# any(lst)
# Out[3]:
# True
# Returns True because at least one of the elements in the list is True
# There you have it, you should have an understanding of how to use any() and all() in your code.
#! COMPLEX()
# complex()
# complex() returns a complex number with the value real + imag*1j or converts a string or number to a complex number.
# If the first parameter is a string, it will be interpreted as a complex number and the function must be called without a second parameter. The second parameter can never be a string. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the constructor serves as a numeric conversion like int and float. If both arguments are omitted, returns 0j.
# If you are doing math or engineering that requires complex numbers (such as dynamics, control systems, or impedance of a circuit) this is a useful tool to have in Python.
# Let's see some examples:
# In [1]:
# # Create 2+3j
# complex(2,3)
# Out[1]:
# (2+3j)
# In [2]:
# complex(10,1)
# Out[2]:
# (10+1j)
# We can also pass strings:
# In [3]:
# complex('12+2j')
# Out[3]:
# (12+2j)
# That's really all there is to this useful function. Keep it in mind if you are ever dealing with complex numbers in Python!
#!END OF UDEMY========================================
# In Python 3.6 (latest version), there are 68 built-in functions. They are listed below alphabetically along with brief description.
# Search Built-in Method
# Method Description
# Python abs() returns absolute value of a number
# Python all() returns true when all elements in iterable is true
# Python any() Checks if any Element of an Iterable is True
# Python ascii() Returns String Containing Printable Representation
# Python bin() converts integer to binary string
# Python bool() Converts a Value to Boolean
# Python bytearray() returns array of given byte size
# Python bytes() returns immutable bytes object
# Python callable() Checks if the Object is Callable
# Python chr() Returns a Character (a string) from an Integer
# Python classmethod() returns class method for given function
# Python compile() Returns a Python code object
# Python complex() Creates a Complex Number
# Python delattr() Deletes Attribute From the Object
# Python dict() Creates a Dictionary
# Python dir() Tries to Return Attributes of Object
# Python divmod() Returns a Tuple of Quotient and Remainder
# Python enumerate() Returns an Enumerate Object
# Python eval() Runs Python Code Within Program
# Python exec() Executes Dynamically Created Program
# Python filter() constructs iterator from elements which are true
# Python float() returns floating point number from number, string
# Python format() returns formatted representation of a value
# Python frozenset() returns immutable frozenset object
# Python getattr() returns value of named attribute of an object
# Python globals() returns dictionary of current global symbol table
# Python hasattr() returns whether object has named attribute
# Python hash() returns hash value of an object
# Python help() Invokes the built-in Help System
# Python hex() Converts to Integer to Hexadecimal
# Python id() Returns Identify of an Object
# Python input() reads and returns a line of string
# Python int() returns integer from a number or string
# Python isinstance() Checks if a Object is an Instance of Class
# Python issubclass() Checks if a Object is Subclass of a Class
# Python iter() returns iterator for an object
# Python len() Returns Length of an Object
# Python list() Function creates list in Python
# Python locals() Returns dictionary of a current local symbol table
# Python map() Applies Function and Returns a List
# Python max() returns largest element
# Python memoryview() returns memory view of an argument
# Python min() returns smallest element
# Python next() Retrieves Next Element from Iterator
# Python object() Creates a Featureless Object
# Python oct() converts integer to octal
# Python open() Returns a File object
# Python ord() returns Unicode code point for Unicode character
# Python pow() returns x to the power of y
# Python print() Prints the Given Object
# Python property() returns a property attribute
# Python range() return sequence of integers between start and stop
# Python repr() returns printable representation of an object
# Python reversed() returns reversed iterator of a sequence
# Python round() rounds a floating point number to ndigits places.
# Python set() returns a Python set
# Python setattr() sets value of an attribute of object
# Python slice() creates a slice object specified by range()
# Python sorted() returns sorted list from a given iterable
# Python staticmethod() creates static method from a function
# Python str() returns informal representation of an object
# Python sum() Add items of an Iterable
# Python super() Allow you to Refer Parent Class by super
# Python tuple() Function Creates a Tuple
# Python type() Returns Type of an Object
# Python vars() Returns __dict__ attribute of a class
# Python zip() Returns an Iterator of Tuples
# Python __import__() Advanced Function Called by import | # yield tuple(result)
| random_line_split |
merge_registry_pr.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Registry.
# usage: ./merge_registry_pr.py (see config env vars below)
#
# This script is inspired by Spark merge script and also borrow some codes from Kafka.
#
# This utility assumes you already have a local Registry git folder and that you
# have added remotes corresponding to both (i) the pull repository (which pull requests are available)
# and (ii) the push repository.
import json
import os
import subprocess
import sys
import urllib2
# Location of your Registry git development area
REGISTRY_HOME = os.environ.get("REGISTRY_HOME", os.getcwd())
# Remote name which points to pull repository
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "pull-repo")
# Remote name which points to push repository
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "push-repo")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/hortonworks/registry/pull"
GITHUB_API_BASE = "https://api.github.com/repos/hortonworks/registry"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
message = "Exceeded the GitHub API rate limit; see the instructions in " +\
"dev/merge_registry_pr.py to configure an OAuth token for making authenticated " +\
"GitHub requests."
else:
message = "Unable to fetch URL, exiting: %s" % url
fail(message)
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print(cmd)
try:
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
except subprocess.CalledProcessError as e:
print("CallProcessError occurred. More information for process is below...")
print("Output - %s" % e.output)
print("Return code - %d" % e.returncode)
raise
def | (prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, reviewers, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n")
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
print("Information of commits: %s" % (commits,))
if len(distinct_authors) > 1:
fail("We don't allow squashing commits which have multiple authors. You need to handle the merge manually. authors: %s" % (distinct_authors,))
primary_author = distinct_authors[0]
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Registry.
merge_message_flags += ["-m", body.replace("@", "")]
merge_message_flags += ["-m", "Author: %s" % primary_author]
if len(reviewers) > 0:
merge_message_flags += ["-m", "Reviewers: %s" % ",".join(reviewers)]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
if len(commits) > 1:
result = raw_input("List pull request commits in squashed commit message? (y/n) [n]: ")
if result.lower() == "y":
should_list_commits = True
else:
should_list_commits = False
else:
should_list_commits = False
# The string "Closes #%s" string is required for GitHub to correctly close the PR
close_line = "Closes #%s from %s" % (pr_num, pr_repo_desc)
if should_list_commits:
close_line += " and squashes the following commits:"
merge_message_flags += ["-m", close_line]
if should_list_commits:
merge_message_flags += ["-m", "\n".join(commits)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
result = raw_input("Merge complete (local ref %s). Push to %s? (y/n)" %
(target_branch_name, PUSH_REMOTE_NAME))
if result.lower() != "y":
result = raw_input("Exiting. Do you want to keep the current state? (expert only) (y/n)")
if result.lower() != "y":
fail("Okay, exiting")
else:
print("Okay, exiting without cleaning up.")
sys.exit(0)
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def get_current_ref():
ref = run_cmd("git rev-parse --abbrev-ref HEAD").strip()
if ref == 'HEAD':
# The current ref is a detached HEAD, so grab its SHA.
return run_cmd("git rev-parse HEAD").strip()
else:
return ref
def get_remotes():
remotes_output = run_cmd("git remote -v").strip().split("\n")
return set(map(lambda x: x.split("\t")[0], remotes_output))
def main():
global original_head
os.chdir(REGISTRY_HOME)
original_head = get_current_ref()
remotes = get_remotes()
if not PR_REMOTE_NAME in remotes:
fail("Remote for pull request [%s] not registered" % PR_REMOTE_NAME)
if not PUSH_REMOTE_NAME in remotes:
fail("Remote for push [%s] not registered" % PUSH_REMOTE_NAME)
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
print("The title of PR: %s" % pr["title"])
if not pr["title"].startswith("ISSUE-"):
print("The title of PR doesn't conform to the Registry rule: doesn't start with 'ISSUE-'")
continue_maybe("Continue merging?")
result = raw_input("Do you want to modify the title before continue? (y/n): ")
if result.lower() == "y":
title_str = raw_input("New title: ")
title = title_str.strip()
else:
print("OK. Will use PR's title.")
title = pr["title"]
else:
title = pr["title"]
body = pr["body"]
if not body or len(body.strip()) <= 0:
print("WARN: The body of PR doesn't have any content which should have information of PR.")
print("If you continue merging, commit title is available but commit message may be empty.")
continue_maybe("Do you want to continue?")
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["state"] != "open":
fail("The state of PR is not 'open'. We don't want to deal with closed PR.")
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" %
(title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
review_comments = get_json("%s/pulls/%s/reviews" % (GITHUB_API_BASE, pr_num))
reviewers = set(map(lambda x: "@" + x["user"]["login"], review_comments))
merge_pr(pr_num, target_ref, title, body, reviewers, pr_repo_desc)
if __name__ == "__main__":
try:
main()
except SystemExit:
# don't clean_up() while receiving SystemExit: the situation might be normal exit
pass
except:
clean_up()
raise
| continue_maybe | identifier_name |
merge_registry_pr.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Registry.
# usage: ./merge_registry_pr.py (see config env vars below)
#
# This script is inspired by Spark merge script and also borrow some codes from Kafka.
#
# This utility assumes you already have a local Registry git folder and that you
# have added remotes corresponding to both (i) the pull repository (which pull requests are available)
# and (ii) the push repository.
import json
import os
import subprocess
import sys
import urllib2
# Location of your Registry git development area
REGISTRY_HOME = os.environ.get("REGISTRY_HOME", os.getcwd())
# Remote name which points to pull repository
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "pull-repo")
# Remote name which points to push repository
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "push-repo")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/hortonworks/registry/pull"
GITHUB_API_BASE = "https://api.github.com/repos/hortonworks/registry"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
message = "Exceeded the GitHub API rate limit; see the instructions in " +\
"dev/merge_registry_pr.py to configure an OAuth token for making authenticated " +\
"GitHub requests."
else:
message = "Unable to fetch URL, exiting: %s" % url
fail(message)
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print(cmd)
try:
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
except subprocess.CalledProcessError as e:
print("CallProcessError occurred. More information for process is below...")
print("Output - %s" % e.output)
print("Return code - %d" % e.returncode)
raise
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, reviewers, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n")
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
print("Information of commits: %s" % (commits,))
if len(distinct_authors) > 1:
fail("We don't allow squashing commits which have multiple authors. You need to handle the merge manually. authors: %s" % (distinct_authors,))
primary_author = distinct_authors[0] | try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Registry.
merge_message_flags += ["-m", body.replace("@", "")]
merge_message_flags += ["-m", "Author: %s" % primary_author]
if len(reviewers) > 0:
merge_message_flags += ["-m", "Reviewers: %s" % ",".join(reviewers)]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
if len(commits) > 1:
result = raw_input("List pull request commits in squashed commit message? (y/n) [n]: ")
if result.lower() == "y":
should_list_commits = True
else:
should_list_commits = False
else:
should_list_commits = False
# The string "Closes #%s" string is required for GitHub to correctly close the PR
close_line = "Closes #%s from %s" % (pr_num, pr_repo_desc)
if should_list_commits:
close_line += " and squashes the following commits:"
merge_message_flags += ["-m", close_line]
if should_list_commits:
merge_message_flags += ["-m", "\n".join(commits)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
result = raw_input("Merge complete (local ref %s). Push to %s? (y/n)" %
(target_branch_name, PUSH_REMOTE_NAME))
if result.lower() != "y":
result = raw_input("Exiting. Do you want to keep the current state? (expert only) (y/n)")
if result.lower() != "y":
fail("Okay, exiting")
else:
print("Okay, exiting without cleaning up.")
sys.exit(0)
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def get_current_ref():
ref = run_cmd("git rev-parse --abbrev-ref HEAD").strip()
if ref == 'HEAD':
# The current ref is a detached HEAD, so grab its SHA.
return run_cmd("git rev-parse HEAD").strip()
else:
return ref
def get_remotes():
remotes_output = run_cmd("git remote -v").strip().split("\n")
return set(map(lambda x: x.split("\t")[0], remotes_output))
def main():
global original_head
os.chdir(REGISTRY_HOME)
original_head = get_current_ref()
remotes = get_remotes()
if not PR_REMOTE_NAME in remotes:
fail("Remote for pull request [%s] not registered" % PR_REMOTE_NAME)
if not PUSH_REMOTE_NAME in remotes:
fail("Remote for push [%s] not registered" % PUSH_REMOTE_NAME)
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
print("The title of PR: %s" % pr["title"])
if not pr["title"].startswith("ISSUE-"):
print("The title of PR doesn't conform to the Registry rule: doesn't start with 'ISSUE-'")
continue_maybe("Continue merging?")
result = raw_input("Do you want to modify the title before continue? (y/n): ")
if result.lower() == "y":
title_str = raw_input("New title: ")
title = title_str.strip()
else:
print("OK. Will use PR's title.")
title = pr["title"]
else:
title = pr["title"]
body = pr["body"]
if not body or len(body.strip()) <= 0:
print("WARN: The body of PR doesn't have any content which should have information of PR.")
print("If you continue merging, commit title is available but commit message may be empty.")
continue_maybe("Do you want to continue?")
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["state"] != "open":
fail("The state of PR is not 'open'. We don't want to deal with closed PR.")
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" %
(title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
review_comments = get_json("%s/pulls/%s/reviews" % (GITHUB_API_BASE, pr_num))
reviewers = set(map(lambda x: "@" + x["user"]["login"], review_comments))
merge_pr(pr_num, target_ref, title, body, reviewers, pr_repo_desc)
if __name__ == "__main__":
try:
main()
except SystemExit:
# don't clean_up() while receiving SystemExit: the situation might be normal exit
pass
except:
clean_up()
raise |
had_conflicts = False | random_line_split |
merge_registry_pr.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Registry.
# usage: ./merge_registry_pr.py (see config env vars below)
#
# This script is inspired by Spark merge script and also borrow some codes from Kafka.
#
# This utility assumes you already have a local Registry git folder and that you
# have added remotes corresponding to both (i) the pull repository (which pull requests are available)
# and (ii) the push repository.
import json
import os
import subprocess
import sys
import urllib2
# Location of your Registry git development area
REGISTRY_HOME = os.environ.get("REGISTRY_HOME", os.getcwd())
# Remote name which points to pull repository
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "pull-repo")
# Remote name which points to push repository
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "push-repo")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/hortonworks/registry/pull"
GITHUB_API_BASE = "https://api.github.com/repos/hortonworks/registry"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
message = "Exceeded the GitHub API rate limit; see the instructions in " +\
"dev/merge_registry_pr.py to configure an OAuth token for making authenticated " +\
"GitHub requests."
else:
message = "Unable to fetch URL, exiting: %s" % url
fail(message)
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print(cmd)
try:
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
except subprocess.CalledProcessError as e:
print("CallProcessError occurred. More information for process is below...")
print("Output - %s" % e.output)
print("Return code - %d" % e.returncode)
raise
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, reviewers, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n")
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
print("Information of commits: %s" % (commits,))
if len(distinct_authors) > 1:
fail("We don't allow squashing commits which have multiple authors. You need to handle the merge manually. authors: %s" % (distinct_authors,))
primary_author = distinct_authors[0]
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Registry.
merge_message_flags += ["-m", body.replace("@", "")]
merge_message_flags += ["-m", "Author: %s" % primary_author]
if len(reviewers) > 0:
merge_message_flags += ["-m", "Reviewers: %s" % ",".join(reviewers)]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
if len(commits) > 1:
result = raw_input("List pull request commits in squashed commit message? (y/n) [n]: ")
if result.lower() == "y":
should_list_commits = True
else:
should_list_commits = False
else:
should_list_commits = False
# The string "Closes #%s" string is required for GitHub to correctly close the PR
close_line = "Closes #%s from %s" % (pr_num, pr_repo_desc)
if should_list_commits:
|
merge_message_flags += ["-m", close_line]
if should_list_commits:
merge_message_flags += ["-m", "\n".join(commits)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
result = raw_input("Merge complete (local ref %s). Push to %s? (y/n)" %
(target_branch_name, PUSH_REMOTE_NAME))
if result.lower() != "y":
result = raw_input("Exiting. Do you want to keep the current state? (expert only) (y/n)")
if result.lower() != "y":
fail("Okay, exiting")
else:
print("Okay, exiting without cleaning up.")
sys.exit(0)
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def get_current_ref():
ref = run_cmd("git rev-parse --abbrev-ref HEAD").strip()
if ref == 'HEAD':
# The current ref is a detached HEAD, so grab its SHA.
return run_cmd("git rev-parse HEAD").strip()
else:
return ref
def get_remotes():
remotes_output = run_cmd("git remote -v").strip().split("\n")
return set(map(lambda x: x.split("\t")[0], remotes_output))
def main():
global original_head
os.chdir(REGISTRY_HOME)
original_head = get_current_ref()
remotes = get_remotes()
if not PR_REMOTE_NAME in remotes:
fail("Remote for pull request [%s] not registered" % PR_REMOTE_NAME)
if not PUSH_REMOTE_NAME in remotes:
fail("Remote for push [%s] not registered" % PUSH_REMOTE_NAME)
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
print("The title of PR: %s" % pr["title"])
if not pr["title"].startswith("ISSUE-"):
print("The title of PR doesn't conform to the Registry rule: doesn't start with 'ISSUE-'")
continue_maybe("Continue merging?")
result = raw_input("Do you want to modify the title before continue? (y/n): ")
if result.lower() == "y":
title_str = raw_input("New title: ")
title = title_str.strip()
else:
print("OK. Will use PR's title.")
title = pr["title"]
else:
title = pr["title"]
body = pr["body"]
if not body or len(body.strip()) <= 0:
print("WARN: The body of PR doesn't have any content which should have information of PR.")
print("If you continue merging, commit title is available but commit message may be empty.")
continue_maybe("Do you want to continue?")
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["state"] != "open":
fail("The state of PR is not 'open'. We don't want to deal with closed PR.")
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" %
(title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
review_comments = get_json("%s/pulls/%s/reviews" % (GITHUB_API_BASE, pr_num))
reviewers = set(map(lambda x: "@" + x["user"]["login"], review_comments))
merge_pr(pr_num, target_ref, title, body, reviewers, pr_repo_desc)
if __name__ == "__main__":
try:
main()
except SystemExit:
# don't clean_up() while receiving SystemExit: the situation might be normal exit
pass
except:
clean_up()
raise
| close_line += " and squashes the following commits:" | conditional_block |
merge_registry_pr.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Registry.
# usage: ./merge_registry_pr.py (see config env vars below)
#
# This script is inspired by Spark merge script and also borrow some codes from Kafka.
#
# This utility assumes you already have a local Registry git folder and that you
# have added remotes corresponding to both (i) the pull repository (which pull requests are available)
# and (ii) the push repository.
import json
import os
import subprocess
import sys
import urllib2
# Location of your Registry git development area
REGISTRY_HOME = os.environ.get("REGISTRY_HOME", os.getcwd())
# Remote name which points to pull repository
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "pull-repo")
# Remote name which points to push repository
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "push-repo")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/hortonworks/registry/pull"
GITHUB_API_BASE = "https://api.github.com/repos/hortonworks/registry"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
message = "Exceeded the GitHub API rate limit; see the instructions in " +\
"dev/merge_registry_pr.py to configure an OAuth token for making authenticated " +\
"GitHub requests."
else:
message = "Unable to fetch URL, exiting: %s" % url
fail(message)
def fail(msg):
|
def run_cmd(cmd):
print(cmd)
try:
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
except subprocess.CalledProcessError as e:
print("CallProcessError occurred. More information for process is below...")
print("Output - %s" % e.output)
print("Return code - %d" % e.returncode)
raise
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, reviewers, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n")
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
print("Information of commits: %s" % (commits,))
if len(distinct_authors) > 1:
fail("We don't allow squashing commits which have multiple authors. You need to handle the merge manually. authors: %s" % (distinct_authors,))
primary_author = distinct_authors[0]
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Registry.
merge_message_flags += ["-m", body.replace("@", "")]
merge_message_flags += ["-m", "Author: %s" % primary_author]
if len(reviewers) > 0:
merge_message_flags += ["-m", "Reviewers: %s" % ",".join(reviewers)]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
if len(commits) > 1:
result = raw_input("List pull request commits in squashed commit message? (y/n) [n]: ")
if result.lower() == "y":
should_list_commits = True
else:
should_list_commits = False
else:
should_list_commits = False
# The string "Closes #%s" string is required for GitHub to correctly close the PR
close_line = "Closes #%s from %s" % (pr_num, pr_repo_desc)
if should_list_commits:
close_line += " and squashes the following commits:"
merge_message_flags += ["-m", close_line]
if should_list_commits:
merge_message_flags += ["-m", "\n".join(commits)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
result = raw_input("Merge complete (local ref %s). Push to %s? (y/n)" %
(target_branch_name, PUSH_REMOTE_NAME))
if result.lower() != "y":
result = raw_input("Exiting. Do you want to keep the current state? (expert only) (y/n)")
if result.lower() != "y":
fail("Okay, exiting")
else:
print("Okay, exiting without cleaning up.")
sys.exit(0)
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def get_current_ref():
ref = run_cmd("git rev-parse --abbrev-ref HEAD").strip()
if ref == 'HEAD':
# The current ref is a detached HEAD, so grab its SHA.
return run_cmd("git rev-parse HEAD").strip()
else:
return ref
def get_remotes():
remotes_output = run_cmd("git remote -v").strip().split("\n")
return set(map(lambda x: x.split("\t")[0], remotes_output))
def main():
global original_head
os.chdir(REGISTRY_HOME)
original_head = get_current_ref()
remotes = get_remotes()
if not PR_REMOTE_NAME in remotes:
fail("Remote for pull request [%s] not registered" % PR_REMOTE_NAME)
if not PUSH_REMOTE_NAME in remotes:
fail("Remote for push [%s] not registered" % PUSH_REMOTE_NAME)
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
print("The title of PR: %s" % pr["title"])
if not pr["title"].startswith("ISSUE-"):
print("The title of PR doesn't conform to the Registry rule: doesn't start with 'ISSUE-'")
continue_maybe("Continue merging?")
result = raw_input("Do you want to modify the title before continue? (y/n): ")
if result.lower() == "y":
title_str = raw_input("New title: ")
title = title_str.strip()
else:
print("OK. Will use PR's title.")
title = pr["title"]
else:
title = pr["title"]
body = pr["body"]
if not body or len(body.strip()) <= 0:
print("WARN: The body of PR doesn't have any content which should have information of PR.")
print("If you continue merging, commit title is available but commit message may be empty.")
continue_maybe("Do you want to continue?")
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["state"] != "open":
fail("The state of PR is not 'open'. We don't want to deal with closed PR.")
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" %
(title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
review_comments = get_json("%s/pulls/%s/reviews" % (GITHUB_API_BASE, pr_num))
reviewers = set(map(lambda x: "@" + x["user"]["login"], review_comments))
merge_pr(pr_num, target_ref, title, body, reviewers, pr_repo_desc)
if __name__ == "__main__":
try:
main()
except SystemExit:
# don't clean_up() while receiving SystemExit: the situation might be normal exit
pass
except:
clean_up()
raise
| print(msg)
clean_up()
sys.exit(-1) | identifier_body |
spike_generators.rs | //! This module represents neurons, generalized as "spike generators." To
//! support a wider range of abstractions, the input neurons are divided into
//! discrete and continuous implementations.
/// The general trait encapsulating a spike generator that has an output voltage
/// V.
pub trait SpikeGenerator<V> {
/// Get whether the neuron/generator has spiked at the update.
fn did_spike(&self) -> bool;
/// Gets the voltage of the neuron/generator at the current time.
fn get_voltage(&self) -> V;
}
/// An extension of a neuron that is in a hidden layer. Such a neuron will have
/// a voltage as well as a time-step as input.
pub trait InnerSpikeGenerator<V, T>: SpikeGenerator<V> {
fn handle_input(&mut self, input: V, dt: T);
}
/// An extension of a neuron for input neurons. These neurons can be advanced
/// with no inputs except the time-step.
pub trait InputSpikeGenerator<V, T>: SpikeGenerator<V> {
fn advance(&mut self, dt: T);
}
/// This module handles discrete neurons. Discrete neurons would be useful for
/// rate-encoding in SNNs and form a good basis for their continuous
/// counterparts.
pub mod discrete {
extern crate dimensioned as dim;
use dim::si;
use std::cmp::Ordering;
use super::{InputSpikeGenerator, SpikeGenerator};
/// An input neuron that spikes at a given time.
///
/// This can be used to represent simple visual inputs such as the neurons
/// that detect whether a particular area is a given color.
///
/// The timings of the spike would generally be based on the example being
/// shown to the SNN, hence is a part of feature extraction.
#[derive(Debug)]
pub struct SpikeAtTimes<T, I> {
times: Vec<T>,
time: T,
error_tolerance: T,
idx: usize,
spike_voltage: I,
}
impl<T: From<si::Second<f64>>, I> SpikeAtTimes<T, I> {
/// Makes a new input neuron that shall spike at the given times,
/// spiking at the given rate.
///
/// The tolerance is in case of floating-point imprecision or a
/// time-step that doesn't exactly hit a spike time. This is an
/// absolute error.
pub fn new(times: Vec<T>, tolerance: T, spike_voltage: I) -> SpikeAtTimes<T, I> {
SpikeAtTimes {
times: times,
time: (0.0 * si::S).into(),
error_tolerance: tolerance,
idx: 0,
spike_voltage: spike_voltage,
}
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
let idx = if self.idx >= self.times.len() {
self.times.len() - 1
} else {
self.idx
};
let time_diff = self.times[idx] - self.time;
return -self.error_tolerance < time_diff && time_diff < self.error_tolerance;
}
fn get_voltage(&self) -> V |
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.time += dt.into();
while self.idx < self.times.len() && self.times[self.idx] < self.time {
self.idx += 1;
}
}
}
/// A neuron that will spike a given number of times between certain time
/// slots. (So it only means "rate" if the slot is one unit long.) This is
/// implemented by taking slots from "rate_at_time" and spiking that many
/// times in that slot.
pub struct SpikeAtRate<T, V> {
rate_at_time: Box<dyn Fn(T) -> Option<(i32, T)>>,
time: T,
slot_start_time: T,
slot_end_time: T,
spike_voltage: V,
current_rate: i32,
num_spiked: i32,
tolerance: T,
}
impl<T, V> SpikeAtRate<T, V>
where
T: From<si::Second<f64>> + PartialOrd + Copy,
{
/// Makes a new neuron that will spike at the rate indicated by invoking
/// the rate_fn at a time-step.
///
/// Args:
/// * `rate_fn`: Returns the rate at which the neuron should spike at at a given
/// time. It also returns a deadline for when all those spikes
/// should occur. If the function returns None, it is assumed that
/// the neuron is done spiking.
/// * `slot_end_time`: When the first starting_rate spikes should occur by.
/// * `spike_voltage`: The voltage to spike at when spiking.
/// * `starting_rate`: The initial rate to spike at.
/// * `tolerance`: "tolerance" is an implementation detail, but an important one: since
/// slots are subdivided to ensure the correct number of spikes in the slot
/// the tolerance is "how far from the starting of a sub-slot should the
/// spike be within." Hence, for a tolerance t, you want to advance in a
/// step t < dt < 2t to be sure that you hit every spike exactly once.
pub fn new(
rate_fn: Box<dyn Fn(T) -> Option<(i32, T)>>,
slot_end_time: T,
spike_voltage: V,
starting_rate: i32,
tolerance: T,
) -> Self {
SpikeAtRate {
rate_at_time: rate_fn,
time: (0.0 * si::S).into(),
slot_start_time: (0.0 * si::S).into(),
slot_end_time: slot_end_time,
spike_voltage: spike_voltage,
current_rate: starting_rate,
num_spiked: 0,
tolerance: tolerance,
}
}
/// Makes a function that, given the vector of slot start times and
/// rates within that slot, returns a function that would serve as a
/// `rate_fn` above.
///
/// As a side-effect, the input vector is lexicographically sorted based
/// on the partial ordering on T. (So if T is a float, the incomparable
/// values are all treated as equal, so use that at your own risk.)
pub fn rate_fn_of_times<'a>(
slot_starts_to_rate: &'a mut Vec<(T, i32)>,
) -> Box<dyn Fn(T) -> Option<(i32, T)> + 'a> {
slot_starts_to_rate.sort_unstable_by(|a, b| {
let (t1, r1) = a;
let (t2, r2) = b;
match t1.partial_cmp(t2) {
Option::None | Option::Some(Ordering::Equal) => r1.cmp(r2),
Option::Some(x) => x,
}
});
Box::new(move |time: T| {
let slot: Vec<&(T, i32)> = (*slot_starts_to_rate)
.iter()
.filter(|slt| time > slt.0)
.take(1)
.collect();
if slot.len() == 0 {
return Option::None;
}
let (new_slot_end, new_rate) = slot[0];
return Option::Some((*new_rate, *new_slot_end));
})
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>> + Copy + std::ops::Sub<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
if self.current_rate <= 0 {
return false;
}
let spike_interval_len: si::Second<f64> =
((self.slot_end_time - self.slot_start_time).into()) / (self.current_rate as f64);
let adjusted_time = self.time.into()
- spike_interval_len * (self.num_spiked as f64)
- self.slot_start_time.into();
0.0 * si::S < adjusted_time && adjusted_time <= self.tolerance.into()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>>
+ Copy
+ std::ops::Sub<Output = T>
+ std::ops::AddAssign
+ PartialOrd<T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
// We move the "spiked" counter first since the more usual usage
// pattern would need to read whether the neuron spiked after
// advancing and doing this state change after the ones below
// would actually mean that checking "did_spike" in a loop would
// actually miss every spike since this check would incorrectly
// increment self.num_spiked.
if self.did_spike() {
self.num_spiked += 1;
}
self.time += dt;
if self.time > self.slot_end_time && self.current_rate > -1 {
self.slot_start_time = self.slot_end_time;
if let Option::Some((new_rate, new_end)) = (*self.rate_at_time)(self.time) {
self.current_rate = new_rate;
self.slot_end_time = new_end;
self.num_spiked = 0;
} else {
self.current_rate = -1;
}
}
}
}
}
/// Ways of adding continuity to neuron implementations.
pub mod continuous {
extern crate dimensioned as dim;
use dim::si;
/// Adds a time-based voltage decay to the discrete neuron type D.
/// The neuron is at 0 voltage until it spikes. Then the voltage is left
/// to the spike_decay_fn. Since the spking is detected by querying the
/// wrapped discrete neuron, the precise timing of the spike may have an
/// error as large as the time step used to `advance` this neuron.
pub struct WithSpikeDecay<D, T, V> {
time_since_spike: T,
discrete_neuron: D,
spike_voltage: V,
spiked_yet: bool,
spike_decay_fn: Box<dyn Fn(T, V) -> V>,
}
impl<T, D, V> WithSpikeDecay<D, T, V>
where
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
/// Args:
/// * `discrete_neuron`: The discrete neuron to add a decay to.
/// * `spike_decay_fn`: The function to decay along. The first argument is the time of
/// the previous spike and the second is the voltage at the spike.
pub fn new(
discrete_neuron: D,
spike_decay_fn: Box<dyn Fn(T, V) -> V>) -> Self {
WithSpikeDecay {
time_since_spike: (0.0 * si::S).into(),
discrete_neuron: discrete_neuron,
spike_voltage: (0.0 * si::V).into(),
spiked_yet: false,
spike_decay_fn: spike_decay_fn,
}
}
/// Wraps a discrete neuron into one that exponentially decays after
/// spiking. The decay function outputted is V * a * e ^ (b * T) where V
/// is the previous spike voltage, T is the time since the previous spike,
/// * `spike_decay_scalar` is the scalar "a",
/// * and `spike_timing_scalar` is the scalar "b" (in the exponent)
pub fn exp_decay(
discrete_neuron: D,
spike_decay_scalar: f64,
spike_timing_scalar: f64,
) -> Self {
Self::new(
discrete_neuron,
Box::new(move |time: T, spike: V| {
((-(time.into() / si::S) * spike_timing_scalar).exp()
* (spike.into() / si::V)
* spike_decay_scalar
* si::V)
.into()
}),
)
}
}
impl<D, T, V> super::SpikeGenerator<V> for WithSpikeDecay<D, T, V>
where
D: super::SpikeGenerator<V>,
T: Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
self.discrete_neuron.did_spike()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.discrete_neuron.get_voltage()
} else if !self.spiked_yet {
(0.0 * si::V).into()
} else {
// Haha function pointer go brr.
(*self.spike_decay_fn)(self.time_since_spike, self.spike_voltage)
}
}
}
impl<D, T, V> super::InputSpikeGenerator<V, T> for WithSpikeDecay<D, T, V>
where
D: super::InputSpikeGenerator<V, T>,
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy + std::ops::AddAssign,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.discrete_neuron.advance(dt);
if self.discrete_neuron.did_spike() {
self.spiked_yet = true;
self.time_since_spike = (0.0 * si::S).into();
self.spike_voltage = self.discrete_neuron.get_voltage();
return;
}
self.time_since_spike += dt;
}
}
}
| {
if self.did_spike() {
self.spike_voltage.into()
} else {
(0.0 * si::V).into()
}
} | identifier_body |
spike_generators.rs | //! This module represents neurons, generalized as "spike generators." To
//! support a wider range of abstractions, the input neurons are divided into
//! discrete and continuous implementations.
/// The general trait encapsulating a spike generator that has an output voltage
/// V.
pub trait SpikeGenerator<V> {
/// Get whether the neuron/generator has spiked at the update.
fn did_spike(&self) -> bool;
/// Gets the voltage of the neuron/generator at the current time.
fn get_voltage(&self) -> V;
}
/// An extension of a neuron that is in a hidden layer. Such a neuron will have
/// a voltage as well as a time-step as input.
pub trait InnerSpikeGenerator<V, T>: SpikeGenerator<V> {
fn handle_input(&mut self, input: V, dt: T);
}
/// An extension of a neuron for input neurons. These neurons can be advanced
/// with no inputs except the time-step.
pub trait InputSpikeGenerator<V, T>: SpikeGenerator<V> {
fn advance(&mut self, dt: T);
}
/// This module handles discrete neurons. Discrete neurons would be useful for
/// rate-encoding in SNNs and form a good basis for their continuous
/// counterparts.
pub mod discrete {
extern crate dimensioned as dim;
use dim::si;
use std::cmp::Ordering;
use super::{InputSpikeGenerator, SpikeGenerator};
/// An input neuron that spikes at a given time.
///
/// This can be used to represent simple visual inputs such as the neurons
/// that detect whether a particular area is a given color.
///
/// The timings of the spike would generally be based on the example being
/// shown to the SNN, hence is a part of feature extraction.
#[derive(Debug)]
pub struct SpikeAtTimes<T, I> {
times: Vec<T>,
time: T,
error_tolerance: T,
idx: usize,
spike_voltage: I,
}
impl<T: From<si::Second<f64>>, I> SpikeAtTimes<T, I> {
/// Makes a new input neuron that shall spike at the given times,
/// spiking at the given rate.
///
/// The tolerance is in case of floating-point imprecision or a
/// time-step that doesn't exactly hit a spike time. This is an
/// absolute error.
pub fn new(times: Vec<T>, tolerance: T, spike_voltage: I) -> SpikeAtTimes<T, I> {
SpikeAtTimes {
times: times,
time: (0.0 * si::S).into(),
error_tolerance: tolerance,
idx: 0,
spike_voltage: spike_voltage,
}
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
let idx = if self.idx >= self.times.len() {
self.times.len() - 1
} else {
self.idx
};
let time_diff = self.times[idx] - self.time;
return -self.error_tolerance < time_diff && time_diff < self.error_tolerance;
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage.into()
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.time += dt.into();
while self.idx < self.times.len() && self.times[self.idx] < self.time {
self.idx += 1;
}
}
}
/// A neuron that will spike a given number of times between certain time
/// slots. (So it only means "rate" if the slot is one unit long.) This is
/// implemented by taking slots from "rate_at_time" and spiking that many
/// times in that slot.
pub struct SpikeAtRate<T, V> {
rate_at_time: Box<dyn Fn(T) -> Option<(i32, T)>>,
time: T,
slot_start_time: T,
slot_end_time: T,
spike_voltage: V,
current_rate: i32,
num_spiked: i32,
tolerance: T,
}
impl<T, V> SpikeAtRate<T, V>
where
T: From<si::Second<f64>> + PartialOrd + Copy,
{
/// Makes a new neuron that will spike at the rate indicated by invoking
/// the rate_fn at a time-step.
///
/// Args:
/// * `rate_fn`: Returns the rate at which the neuron should spike at at a given
/// time. It also returns a deadline for when all those spikes
/// should occur. If the function returns None, it is assumed that
/// the neuron is done spiking.
/// * `slot_end_time`: When the first starting_rate spikes should occur by.
/// * `spike_voltage`: The voltage to spike at when spiking.
/// * `starting_rate`: The initial rate to spike at.
/// * `tolerance`: "tolerance" is an implementation detail, but an important one: since
/// slots are subdivided to ensure the correct number of spikes in the slot
/// the tolerance is "how far from the starting of a sub-slot should the
/// spike be within." Hence, for a tolerance t, you want to advance in a
/// step t < dt < 2t to be sure that you hit every spike exactly once.
pub fn new(
rate_fn: Box<dyn Fn(T) -> Option<(i32, T)>>,
slot_end_time: T,
spike_voltage: V,
starting_rate: i32,
tolerance: T,
) -> Self {
SpikeAtRate {
rate_at_time: rate_fn,
time: (0.0 * si::S).into(),
slot_start_time: (0.0 * si::S).into(),
slot_end_time: slot_end_time,
spike_voltage: spike_voltage,
current_rate: starting_rate,
num_spiked: 0,
tolerance: tolerance,
}
}
/// Makes a function that, given the vector of slot start times and
/// rates within that slot, returns a function that would serve as a
/// `rate_fn` above.
///
/// As a side-effect, the input vector is lexicographically sorted based
/// on the partial ordering on T. (So if T is a float, the incomparable
/// values are all treated as equal, so use that at your own risk.)
pub fn | <'a>(
slot_starts_to_rate: &'a mut Vec<(T, i32)>,
) -> Box<dyn Fn(T) -> Option<(i32, T)> + 'a> {
slot_starts_to_rate.sort_unstable_by(|a, b| {
let (t1, r1) = a;
let (t2, r2) = b;
match t1.partial_cmp(t2) {
Option::None | Option::Some(Ordering::Equal) => r1.cmp(r2),
Option::Some(x) => x,
}
});
Box::new(move |time: T| {
let slot: Vec<&(T, i32)> = (*slot_starts_to_rate)
.iter()
.filter(|slt| time > slt.0)
.take(1)
.collect();
if slot.len() == 0 {
return Option::None;
}
let (new_slot_end, new_rate) = slot[0];
return Option::Some((*new_rate, *new_slot_end));
})
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>> + Copy + std::ops::Sub<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
if self.current_rate <= 0 {
return false;
}
let spike_interval_len: si::Second<f64> =
((self.slot_end_time - self.slot_start_time).into()) / (self.current_rate as f64);
let adjusted_time = self.time.into()
- spike_interval_len * (self.num_spiked as f64)
- self.slot_start_time.into();
0.0 * si::S < adjusted_time && adjusted_time <= self.tolerance.into()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>>
+ Copy
+ std::ops::Sub<Output = T>
+ std::ops::AddAssign
+ PartialOrd<T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
// We move the "spiked" counter first since the more usual usage
// pattern would need to read whether the neuron spiked after
// advancing and doing this state change after the ones below
// would actually mean that checking "did_spike" in a loop would
// actually miss every spike since this check would incorrectly
// increment self.num_spiked.
if self.did_spike() {
self.num_spiked += 1;
}
self.time += dt;
if self.time > self.slot_end_time && self.current_rate > -1 {
self.slot_start_time = self.slot_end_time;
if let Option::Some((new_rate, new_end)) = (*self.rate_at_time)(self.time) {
self.current_rate = new_rate;
self.slot_end_time = new_end;
self.num_spiked = 0;
} else {
self.current_rate = -1;
}
}
}
}
}
/// Ways of adding continuity to neuron implementations.
pub mod continuous {
extern crate dimensioned as dim;
use dim::si;
/// Adds a time-based voltage decay to the discrete neuron type D.
/// The neuron is at 0 voltage until it spikes. Then the voltage is left
/// to the spike_decay_fn. Since the spking is detected by querying the
/// wrapped discrete neuron, the precise timing of the spike may have an
/// error as large as the time step used to `advance` this neuron.
pub struct WithSpikeDecay<D, T, V> {
time_since_spike: T,
discrete_neuron: D,
spike_voltage: V,
spiked_yet: bool,
spike_decay_fn: Box<dyn Fn(T, V) -> V>,
}
impl<T, D, V> WithSpikeDecay<D, T, V>
where
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
/// Args:
/// * `discrete_neuron`: The discrete neuron to add a decay to.
/// * `spike_decay_fn`: The function to decay along. The first argument is the time of
/// the previous spike and the second is the voltage at the spike.
pub fn new(
discrete_neuron: D,
spike_decay_fn: Box<dyn Fn(T, V) -> V>) -> Self {
WithSpikeDecay {
time_since_spike: (0.0 * si::S).into(),
discrete_neuron: discrete_neuron,
spike_voltage: (0.0 * si::V).into(),
spiked_yet: false,
spike_decay_fn: spike_decay_fn,
}
}
/// Wraps a discrete neuron into one that exponentially decays after
/// spiking. The decay function outputted is V * a * e ^ (b * T) where V
/// is the previous spike voltage, T is the time since the previous spike,
/// * `spike_decay_scalar` is the scalar "a",
/// * and `spike_timing_scalar` is the scalar "b" (in the exponent)
pub fn exp_decay(
discrete_neuron: D,
spike_decay_scalar: f64,
spike_timing_scalar: f64,
) -> Self {
Self::new(
discrete_neuron,
Box::new(move |time: T, spike: V| {
((-(time.into() / si::S) * spike_timing_scalar).exp()
* (spike.into() / si::V)
* spike_decay_scalar
* si::V)
.into()
}),
)
}
}
impl<D, T, V> super::SpikeGenerator<V> for WithSpikeDecay<D, T, V>
where
D: super::SpikeGenerator<V>,
T: Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
self.discrete_neuron.did_spike()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.discrete_neuron.get_voltage()
} else if !self.spiked_yet {
(0.0 * si::V).into()
} else {
// Haha function pointer go brr.
(*self.spike_decay_fn)(self.time_since_spike, self.spike_voltage)
}
}
}
impl<D, T, V> super::InputSpikeGenerator<V, T> for WithSpikeDecay<D, T, V>
where
D: super::InputSpikeGenerator<V, T>,
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy + std::ops::AddAssign,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.discrete_neuron.advance(dt);
if self.discrete_neuron.did_spike() {
self.spiked_yet = true;
self.time_since_spike = (0.0 * si::S).into();
self.spike_voltage = self.discrete_neuron.get_voltage();
return;
}
self.time_since_spike += dt;
}
}
}
| rate_fn_of_times | identifier_name |
spike_generators.rs | //! This module represents neurons, generalized as "spike generators." To
//! support a wider range of abstractions, the input neurons are divided into
//! discrete and continuous implementations.
/// The general trait encapsulating a spike generator that has an output voltage
/// V.
pub trait SpikeGenerator<V> {
/// Get whether the neuron/generator has spiked at the update.
fn did_spike(&self) -> bool;
/// Gets the voltage of the neuron/generator at the current time.
fn get_voltage(&self) -> V;
}
/// An extension of a neuron that is in a hidden layer. Such a neuron will have
/// a voltage as well as a time-step as input.
pub trait InnerSpikeGenerator<V, T>: SpikeGenerator<V> {
fn handle_input(&mut self, input: V, dt: T);
}
/// An extension of a neuron for input neurons. These neurons can be advanced
/// with no inputs except the time-step.
pub trait InputSpikeGenerator<V, T>: SpikeGenerator<V> {
fn advance(&mut self, dt: T);
}
/// This module handles discrete neurons. Discrete neurons would be useful for
/// rate-encoding in SNNs and form a good basis for their continuous
/// counterparts.
pub mod discrete {
extern crate dimensioned as dim;
use dim::si;
use std::cmp::Ordering;
use super::{InputSpikeGenerator, SpikeGenerator};
/// An input neuron that spikes at a given time.
///
/// This can be used to represent simple visual inputs such as the neurons
/// that detect whether a particular area is a given color.
///
/// The timings of the spike would generally be based on the example being
/// shown to the SNN, hence is a part of feature extraction.
#[derive(Debug)]
pub struct SpikeAtTimes<T, I> {
times: Vec<T>,
time: T,
error_tolerance: T,
idx: usize,
spike_voltage: I,
}
impl<T: From<si::Second<f64>>, I> SpikeAtTimes<T, I> {
/// Makes a new input neuron that shall spike at the given times,
/// spiking at the given rate.
///
/// The tolerance is in case of floating-point imprecision or a
/// time-step that doesn't exactly hit a spike time. This is an
/// absolute error.
pub fn new(times: Vec<T>, tolerance: T, spike_voltage: I) -> SpikeAtTimes<T, I> {
SpikeAtTimes {
times: times,
time: (0.0 * si::S).into(),
error_tolerance: tolerance,
idx: 0,
spike_voltage: spike_voltage,
}
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
let idx = if self.idx >= self.times.len() {
self.times.len() - 1
} else {
self.idx
};
let time_diff = self.times[idx] - self.time;
return -self.error_tolerance < time_diff && time_diff < self.error_tolerance;
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage.into()
} else |
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.time += dt.into();
while self.idx < self.times.len() && self.times[self.idx] < self.time {
self.idx += 1;
}
}
}
/// A neuron that will spike a given number of times between certain time
/// slots. (So it only means "rate" if the slot is one unit long.) This is
/// implemented by taking slots from "rate_at_time" and spiking that many
/// times in that slot.
pub struct SpikeAtRate<T, V> {
rate_at_time: Box<dyn Fn(T) -> Option<(i32, T)>>,
time: T,
slot_start_time: T,
slot_end_time: T,
spike_voltage: V,
current_rate: i32,
num_spiked: i32,
tolerance: T,
}
impl<T, V> SpikeAtRate<T, V>
where
T: From<si::Second<f64>> + PartialOrd + Copy,
{
/// Makes a new neuron that will spike at the rate indicated by invoking
/// the rate_fn at a time-step.
///
/// Args:
/// * `rate_fn`: Returns the rate at which the neuron should spike at at a given
/// time. It also returns a deadline for when all those spikes
/// should occur. If the function returns None, it is assumed that
/// the neuron is done spiking.
/// * `slot_end_time`: When the first starting_rate spikes should occur by.
/// * `spike_voltage`: The voltage to spike at when spiking.
/// * `starting_rate`: The initial rate to spike at.
/// * `tolerance`: "tolerance" is an implementation detail, but an important one: since
/// slots are subdivided to ensure the correct number of spikes in the slot
/// the tolerance is "how far from the starting of a sub-slot should the
/// spike be within." Hence, for a tolerance t, you want to advance in a
/// step t < dt < 2t to be sure that you hit every spike exactly once.
pub fn new(
rate_fn: Box<dyn Fn(T) -> Option<(i32, T)>>,
slot_end_time: T,
spike_voltage: V,
starting_rate: i32,
tolerance: T,
) -> Self {
SpikeAtRate {
rate_at_time: rate_fn,
time: (0.0 * si::S).into(),
slot_start_time: (0.0 * si::S).into(),
slot_end_time: slot_end_time,
spike_voltage: spike_voltage,
current_rate: starting_rate,
num_spiked: 0,
tolerance: tolerance,
}
}
/// Makes a function that, given the vector of slot start times and
/// rates within that slot, returns a function that would serve as a
/// `rate_fn` above.
///
/// As a side-effect, the input vector is lexicographically sorted based
/// on the partial ordering on T. (So if T is a float, the incomparable
/// values are all treated as equal, so use that at your own risk.)
pub fn rate_fn_of_times<'a>(
slot_starts_to_rate: &'a mut Vec<(T, i32)>,
) -> Box<dyn Fn(T) -> Option<(i32, T)> + 'a> {
slot_starts_to_rate.sort_unstable_by(|a, b| {
let (t1, r1) = a;
let (t2, r2) = b;
match t1.partial_cmp(t2) {
Option::None | Option::Some(Ordering::Equal) => r1.cmp(r2),
Option::Some(x) => x,
}
});
Box::new(move |time: T| {
let slot: Vec<&(T, i32)> = (*slot_starts_to_rate)
.iter()
.filter(|slt| time > slt.0)
.take(1)
.collect();
if slot.len() == 0 {
return Option::None;
}
let (new_slot_end, new_rate) = slot[0];
return Option::Some((*new_rate, *new_slot_end));
})
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>> + Copy + std::ops::Sub<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
if self.current_rate <= 0 {
return false;
}
let spike_interval_len: si::Second<f64> =
((self.slot_end_time - self.slot_start_time).into()) / (self.current_rate as f64);
let adjusted_time = self.time.into()
- spike_interval_len * (self.num_spiked as f64)
- self.slot_start_time.into();
0.0 * si::S < adjusted_time && adjusted_time <= self.tolerance.into()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>>
+ Copy
+ std::ops::Sub<Output = T>
+ std::ops::AddAssign
+ PartialOrd<T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
// We move the "spiked" counter first since the more usual usage
// pattern would need to read whether the neuron spiked after
// advancing and doing this state change after the ones below
// would actually mean that checking "did_spike" in a loop would
// actually miss every spike since this check would incorrectly
// increment self.num_spiked.
if self.did_spike() {
self.num_spiked += 1;
}
self.time += dt;
if self.time > self.slot_end_time && self.current_rate > -1 {
self.slot_start_time = self.slot_end_time;
if let Option::Some((new_rate, new_end)) = (*self.rate_at_time)(self.time) {
self.current_rate = new_rate;
self.slot_end_time = new_end;
self.num_spiked = 0;
} else {
self.current_rate = -1;
}
}
}
}
}
/// Ways of adding continuity to neuron implementations.
pub mod continuous {
extern crate dimensioned as dim;
use dim::si;
/// Adds a time-based voltage decay to the discrete neuron type D.
/// The neuron is at 0 voltage until it spikes. Then the voltage is left
/// to the spike_decay_fn. Since the spking is detected by querying the
/// wrapped discrete neuron, the precise timing of the spike may have an
/// error as large as the time step used to `advance` this neuron.
pub struct WithSpikeDecay<D, T, V> {
time_since_spike: T,
discrete_neuron: D,
spike_voltage: V,
spiked_yet: bool,
spike_decay_fn: Box<dyn Fn(T, V) -> V>,
}
impl<T, D, V> WithSpikeDecay<D, T, V>
where
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
/// Args:
/// * `discrete_neuron`: The discrete neuron to add a decay to.
/// * `spike_decay_fn`: The function to decay along. The first argument is the time of
/// the previous spike and the second is the voltage at the spike.
pub fn new(
discrete_neuron: D,
spike_decay_fn: Box<dyn Fn(T, V) -> V>) -> Self {
WithSpikeDecay {
time_since_spike: (0.0 * si::S).into(),
discrete_neuron: discrete_neuron,
spike_voltage: (0.0 * si::V).into(),
spiked_yet: false,
spike_decay_fn: spike_decay_fn,
}
}
/// Wraps a discrete neuron into one that exponentially decays after
/// spiking. The decay function outputted is V * a * e ^ (b * T) where V
/// is the previous spike voltage, T is the time since the previous spike,
/// * `spike_decay_scalar` is the scalar "a",
/// * and `spike_timing_scalar` is the scalar "b" (in the exponent)
pub fn exp_decay(
discrete_neuron: D,
spike_decay_scalar: f64,
spike_timing_scalar: f64,
) -> Self {
Self::new(
discrete_neuron,
Box::new(move |time: T, spike: V| {
((-(time.into() / si::S) * spike_timing_scalar).exp()
* (spike.into() / si::V)
* spike_decay_scalar
* si::V)
.into()
}),
)
}
}
impl<D, T, V> super::SpikeGenerator<V> for WithSpikeDecay<D, T, V>
where
D: super::SpikeGenerator<V>,
T: Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
self.discrete_neuron.did_spike()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.discrete_neuron.get_voltage()
} else if !self.spiked_yet {
(0.0 * si::V).into()
} else {
// Haha function pointer go brr.
(*self.spike_decay_fn)(self.time_since_spike, self.spike_voltage)
}
}
}
impl<D, T, V> super::InputSpikeGenerator<V, T> for WithSpikeDecay<D, T, V>
where
D: super::InputSpikeGenerator<V, T>,
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy + std::ops::AddAssign,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.discrete_neuron.advance(dt);
if self.discrete_neuron.did_spike() {
self.spiked_yet = true;
self.time_since_spike = (0.0 * si::S).into();
self.spike_voltage = self.discrete_neuron.get_voltage();
return;
}
self.time_since_spike += dt;
}
}
}
| {
(0.0 * si::V).into()
} | conditional_block |
spike_generators.rs | //! This module represents neurons, generalized as "spike generators." To
//! support a wider range of abstractions, the input neurons are divided into
//! discrete and continuous implementations.
/// The general trait encapsulating a spike generator that has an output voltage
/// V.
pub trait SpikeGenerator<V> {
/// Get whether the neuron/generator has spiked at the update.
fn did_spike(&self) -> bool;
/// Gets the voltage of the neuron/generator at the current time.
fn get_voltage(&self) -> V;
}
/// An extension of a neuron that is in a hidden layer. Such a neuron will have
/// a voltage as well as a time-step as input.
pub trait InnerSpikeGenerator<V, T>: SpikeGenerator<V> {
fn handle_input(&mut self, input: V, dt: T);
}
/// An extension of a neuron for input neurons. These neurons can be advanced
/// with no inputs except the time-step.
pub trait InputSpikeGenerator<V, T>: SpikeGenerator<V> {
fn advance(&mut self, dt: T);
}
/// This module handles discrete neurons. Discrete neurons would be useful for
/// rate-encoding in SNNs and form a good basis for their continuous
/// counterparts.
pub mod discrete {
extern crate dimensioned as dim;
use dim::si;
use std::cmp::Ordering;
use super::{InputSpikeGenerator, SpikeGenerator};
/// An input neuron that spikes at a given time.
///
/// This can be used to represent simple visual inputs such as the neurons
/// that detect whether a particular area is a given color.
///
/// The timings of the spike would generally be based on the example being
/// shown to the SNN, hence is a part of feature extraction.
#[derive(Debug)]
pub struct SpikeAtTimes<T, I> {
times: Vec<T>,
time: T,
error_tolerance: T,
idx: usize,
spike_voltage: I,
}
impl<T: From<si::Second<f64>>, I> SpikeAtTimes<T, I> {
/// Makes a new input neuron that shall spike at the given times,
/// spiking at the given rate.
///
/// The tolerance is in case of floating-point imprecision or a
/// time-step that doesn't exactly hit a spike time. This is an
/// absolute error.
pub fn new(times: Vec<T>, tolerance: T, spike_voltage: I) -> SpikeAtTimes<T, I> {
SpikeAtTimes {
times: times,
time: (0.0 * si::S).into(),
error_tolerance: tolerance,
idx: 0,
spike_voltage: spike_voltage,
}
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
let idx = if self.idx >= self.times.len() {
self.times.len() - 1
} else {
self.idx
};
let time_diff = self.times[idx] - self.time;
return -self.error_tolerance < time_diff && time_diff < self.error_tolerance;
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage.into()
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.time += dt.into();
while self.idx < self.times.len() && self.times[self.idx] < self.time {
self.idx += 1;
}
}
}
/// A neuron that will spike a given number of times between certain time
/// slots. (So it only means "rate" if the slot is one unit long.) This is
/// implemented by taking slots from "rate_at_time" and spiking that many
/// times in that slot.
pub struct SpikeAtRate<T, V> {
rate_at_time: Box<dyn Fn(T) -> Option<(i32, T)>>,
time: T,
slot_start_time: T,
slot_end_time: T,
spike_voltage: V,
current_rate: i32,
num_spiked: i32,
tolerance: T,
}
impl<T, V> SpikeAtRate<T, V>
where
T: From<si::Second<f64>> + PartialOrd + Copy,
{
/// Makes a new neuron that will spike at the rate indicated by invoking
/// the rate_fn at a time-step.
///
/// Args:
/// * `rate_fn`: Returns the rate at which the neuron should spike at at a given
/// time. It also returns a deadline for when all those spikes
/// should occur. If the function returns None, it is assumed that
/// the neuron is done spiking.
/// * `slot_end_time`: When the first starting_rate spikes should occur by.
/// * `spike_voltage`: The voltage to spike at when spiking.
/// * `starting_rate`: The initial rate to spike at.
/// * `tolerance`: "tolerance" is an implementation detail, but an important one: since
/// slots are subdivided to ensure the correct number of spikes in the slot
/// the tolerance is "how far from the starting of a sub-slot should the
/// spike be within." Hence, for a tolerance t, you want to advance in a
/// step t < dt < 2t to be sure that you hit every spike exactly once.
pub fn new(
rate_fn: Box<dyn Fn(T) -> Option<(i32, T)>>,
slot_end_time: T,
spike_voltage: V,
starting_rate: i32,
tolerance: T,
) -> Self {
SpikeAtRate {
rate_at_time: rate_fn,
time: (0.0 * si::S).into(),
slot_start_time: (0.0 * si::S).into(),
slot_end_time: slot_end_time,
spike_voltage: spike_voltage,
current_rate: starting_rate,
num_spiked: 0,
tolerance: tolerance,
}
}
/// Makes a function that, given the vector of slot start times and
/// rates within that slot, returns a function that would serve as a
/// `rate_fn` above.
///
/// As a side-effect, the input vector is lexicographically sorted based
/// on the partial ordering on T. (So if T is a float, the incomparable
/// values are all treated as equal, so use that at your own risk.)
pub fn rate_fn_of_times<'a>( | ) -> Box<dyn Fn(T) -> Option<(i32, T)> + 'a> {
slot_starts_to_rate.sort_unstable_by(|a, b| {
let (t1, r1) = a;
let (t2, r2) = b;
match t1.partial_cmp(t2) {
Option::None | Option::Some(Ordering::Equal) => r1.cmp(r2),
Option::Some(x) => x,
}
});
Box::new(move |time: T| {
let slot: Vec<&(T, i32)> = (*slot_starts_to_rate)
.iter()
.filter(|slt| time > slt.0)
.take(1)
.collect();
if slot.len() == 0 {
return Option::None;
}
let (new_slot_end, new_rate) = slot[0];
return Option::Some((*new_rate, *new_slot_end));
})
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>> + Copy + std::ops::Sub<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
if self.current_rate <= 0 {
return false;
}
let spike_interval_len: si::Second<f64> =
((self.slot_end_time - self.slot_start_time).into()) / (self.current_rate as f64);
let adjusted_time = self.time.into()
- spike_interval_len * (self.num_spiked as f64)
- self.slot_start_time.into();
0.0 * si::S < adjusted_time && adjusted_time <= self.tolerance.into()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>>
+ Copy
+ std::ops::Sub<Output = T>
+ std::ops::AddAssign
+ PartialOrd<T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
// We move the "spiked" counter first since the more usual usage
// pattern would need to read whether the neuron spiked after
// advancing and doing this state change after the ones below
// would actually mean that checking "did_spike" in a loop would
// actually miss every spike since this check would incorrectly
// increment self.num_spiked.
if self.did_spike() {
self.num_spiked += 1;
}
self.time += dt;
if self.time > self.slot_end_time && self.current_rate > -1 {
self.slot_start_time = self.slot_end_time;
if let Option::Some((new_rate, new_end)) = (*self.rate_at_time)(self.time) {
self.current_rate = new_rate;
self.slot_end_time = new_end;
self.num_spiked = 0;
} else {
self.current_rate = -1;
}
}
}
}
}
/// Ways of adding continuity to neuron implementations.
pub mod continuous {
extern crate dimensioned as dim;
use dim::si;
/// Adds a time-based voltage decay to the discrete neuron type D.
/// The neuron is at 0 voltage until it spikes. Then the voltage is left
/// to the spike_decay_fn. Since the spking is detected by querying the
/// wrapped discrete neuron, the precise timing of the spike may have an
/// error as large as the time step used to `advance` this neuron.
pub struct WithSpikeDecay<D, T, V> {
time_since_spike: T,
discrete_neuron: D,
spike_voltage: V,
spiked_yet: bool,
spike_decay_fn: Box<dyn Fn(T, V) -> V>,
}
impl<T, D, V> WithSpikeDecay<D, T, V>
where
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
/// Args:
/// * `discrete_neuron`: The discrete neuron to add a decay to.
/// * `spike_decay_fn`: The function to decay along. The first argument is the time of
/// the previous spike and the second is the voltage at the spike.
pub fn new(
discrete_neuron: D,
spike_decay_fn: Box<dyn Fn(T, V) -> V>) -> Self {
WithSpikeDecay {
time_since_spike: (0.0 * si::S).into(),
discrete_neuron: discrete_neuron,
spike_voltage: (0.0 * si::V).into(),
spiked_yet: false,
spike_decay_fn: spike_decay_fn,
}
}
/// Wraps a discrete neuron into one that exponentially decays after
/// spiking. The decay function outputted is V * a * e ^ (b * T) where V
/// is the previous spike voltage, T is the time since the previous spike,
/// * `spike_decay_scalar` is the scalar "a",
/// * and `spike_timing_scalar` is the scalar "b" (in the exponent)
pub fn exp_decay(
discrete_neuron: D,
spike_decay_scalar: f64,
spike_timing_scalar: f64,
) -> Self {
Self::new(
discrete_neuron,
Box::new(move |time: T, spike: V| {
((-(time.into() / si::S) * spike_timing_scalar).exp()
* (spike.into() / si::V)
* spike_decay_scalar
* si::V)
.into()
}),
)
}
}
impl<D, T, V> super::SpikeGenerator<V> for WithSpikeDecay<D, T, V>
where
D: super::SpikeGenerator<V>,
T: Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
self.discrete_neuron.did_spike()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.discrete_neuron.get_voltage()
} else if !self.spiked_yet {
(0.0 * si::V).into()
} else {
// Haha function pointer go brr.
(*self.spike_decay_fn)(self.time_since_spike, self.spike_voltage)
}
}
}
impl<D, T, V> super::InputSpikeGenerator<V, T> for WithSpikeDecay<D, T, V>
where
D: super::InputSpikeGenerator<V, T>,
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy + std::ops::AddAssign,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.discrete_neuron.advance(dt);
if self.discrete_neuron.did_spike() {
self.spiked_yet = true;
self.time_since_spike = (0.0 * si::S).into();
self.spike_voltage = self.discrete_neuron.get_voltage();
return;
}
self.time_since_spike += dt;
}
}
} | slot_starts_to_rate: &'a mut Vec<(T, i32)>, | random_line_split |
utils.ts | import crypto from 'crypto'
import { writeHeapSnapshot } from 'v8'
import { wait } from 'streamr-test-utils'
import { providers, Wallet } from 'ethers'
import { PublishRequest } from 'streamr-client-protocol'
import LeakDetector from 'jest-leak-detector'
import { pTimeout, counterId, CounterId, AggregatedError, pLimitFn } from '../src/utils'
import { Debug, inspect, format } from '../src/utils/log'
import { MaybeAsync } from '../src/types'
import { validateOptions } from '../src/stream/utils'
import type { StreamPartDefinitionOptions, StreamProperties } from '../src/stream'
import { StreamrClient } from '../src/StreamrClient'
import config from './integration/config'
const testDebugRoot = Debug('test')
const testDebug = testDebugRoot.extend.bind(testDebugRoot)
export {
testDebug as Debug
}
export const uid = (prefix?: string) => counterId(`p${process.pid}${prefix ? '-' + prefix : ''}`)
export function fakePrivateKey() {
return crypto.randomBytes(32).toString('hex')
}
export function fakeAddress() {
return crypto.randomBytes(32).toString('hex').slice(0, 40)
}
const TEST_REPEATS = (process.env.TEST_REPEATS) ? parseInt(process.env.TEST_REPEATS, 10) : 1
export function describeRepeats(msg: any, fn: any, describeFn = describe) {
for (let k = 0; k < TEST_REPEATS; k++) {
// eslint-disable-next-line no-loop-func
describe(msg, () => {
describeFn(`test repeat ${k + 1} of ${TEST_REPEATS}`, fn)
})
}
}
describeRepeats.skip = (msg: any, fn: any) => {
describe.skip(`${msg} – test repeat ALL of ${TEST_REPEATS}`, fn)
}
describeRepeats.only = (msg: any, fn: any) => {
describeRepeats(msg, fn, describe.only)
}
export async function collect(iterator: any, fn: MaybeAsync<(item: any) => void> = async () => {}) {
const received: any[] = []
for await (const msg of iterator) {
received.push(msg.getParsedContent())
await fn({
msg, iterator, received,
})
}
return received
}
export function getTestSetTimeout(): (...args: Parameters<typeof setTimeout>) => ReturnType<typeof setTimeout> {
| export function addAfterFn() {
const afterFns: any[] = []
afterEach(async () => {
const fns = afterFns.slice()
afterFns.length = 0
// @ts-expect-error
AggregatedError.throwAllSettled(await Promise.allSettled(fns.map((fn) => fn())))
})
return (fn: any) => {
afterFns.push(fn)
}
}
export const Msg = (opts?: any) => ({
value: uid('msg'),
...opts,
})
function defaultMessageMatchFn(msgTarget: any, msgGot: any) {
if (msgTarget.streamMessage.signature) {
// compare signatures by default
return msgTarget.streamMessage.signature === msgGot.signature
}
return JSON.stringify(msgGot.content) === JSON.stringify(msgTarget.streamMessage.getParsedContent())
}
export function getWaitForStorage(client: StreamrClient, defaultOpts = {}) {
/* eslint-disable no-await-in-loop */
return async (publishRequest: any, opts = {}) => {
const {
streamId,
streamPartition = 0,
interval = 500,
timeout = 10000,
count = 100,
messageMatchFn = defaultMessageMatchFn
} = validateOptions({
...defaultOpts,
...opts,
})
if (!publishRequest && !publishRequest.streamMessage) {
throw new Error(`should check against publish request for comparison, got: ${inspect(publishRequest)}`)
}
const start = Date.now()
let last: any
// eslint-disable-next-line no-constant-condition
let found = false
while (!found) {
const duration = Date.now() - start
if (duration > timeout) {
client.debug('waitForStorage timeout %o', {
timeout,
duration
}, {
publishRequest,
last: last!.map((l: any) => l.content),
})
const err: any = new Error(`timed out after ${duration}ms waiting for message: ${inspect(publishRequest)}`)
err.publishRequest = publishRequest
throw err
}
last = await client.getStreamLast({
// @ts-expect-error
streamId,
streamPartition,
count,
})
for (const lastMsg of last) {
if (messageMatchFn(publishRequest, lastMsg)) {
found = true
return
}
}
client.debug('message not found, retrying... %o', {
msg: publishRequest.streamMessage.getParsedContent(),
last: last.map(({ content }: any) => content)
})
await wait(interval)
}
}
/* eslint-enable no-await-in-loop */
}
export type CreateMessageOpts = {
/** index of message in total */
index: number,
/** batch number */
batch: number,
/** index of message in batch */
batchIndex: number,
/** total messages */
total: number
}
export type PublishOpts = {
testName: string,
delay: number
timeout: number
/** set false to allow gc message content */
retainMessages: boolean,
waitForLast: boolean
waitForLastCount: number
waitForLastTimeout: number
beforeEach: (m: any) => any
afterEach: (msg: any, request: PublishRequest) => Promise<void> | void
timestamp: number | (() => number)
partitionKey: string
createMessage: (opts: CreateMessageOpts) => Promise<any> | any
batchSize: number
}
type PublishTestMessagesOpts = StreamPartDefinitionOptions & Partial<PublishOpts>
export function getPublishTestMessages(client: StreamrClient, defaultOptsOrStreamId: string | PublishTestMessagesOpts = {}) {
// second argument could also be streamId
let defaultOpts: PublishTestMessagesOpts
if (typeof defaultOptsOrStreamId === 'string') {
// eslint-disable-next-line no-param-reassign
defaultOpts = {
streamId: defaultOptsOrStreamId as string,
}
} else {
defaultOpts = defaultOptsOrStreamId as PublishTestMessagesOpts
}
const publishTestMessagesRaw = async (n = 4, opts: PublishTestMessagesOpts = {}) => {
const id = 'testName' in opts ? opts.testName : uid('test')
let msgCount = 0
const {
streamId,
streamPartition = 0,
retainMessages = true,
delay = 100,
timeout = 3500,
waitForLast = false, // wait for message to hit storage
waitForLastCount,
waitForLastTimeout,
beforeEach = (m: any) => m,
afterEach = () => {},
timestamp,
partitionKey,
batchSize = 1,
createMessage = () => {
msgCount += 1
return {
test: id,
value: `${msgCount} of ${n}`
}
},
} = validateOptions<PublishTestMessagesOpts>({
...defaultOpts,
...opts,
})
let connectionDone = false
function checkDone() {
if (connectionDone) {
throw new Error('Connection done before finished publishing')
}
}
const onDone = () => {
connectionDone = true
}
try {
client.connection.once('done', onDone)
// async queue to ensure messages set up in order
const setupMessage = pLimitFn(async (publishOpts) => {
const message = createMessage(publishOpts)
await beforeEach(message)
return message
})
const publishMessage = async (publishOpts: CreateMessageOpts) => {
if (connectionDone) { return }
const message = await setupMessage(publishOpts)
if (connectionDone) { return }
const { index } = publishOpts
const request = await pTimeout(client.publish(
{ streamId, streamPartition },
message,
typeof timestamp === 'function' ? timestamp() : timestamp,
partitionKey
), timeout, `publish timeout ${streamId}: ${index} ${inspect(message, {
maxStringLength: 256,
})}`).catch((err) => {
if (connectionDone && err.message.includes('Needs connection')) {
// ignore connection closed error
return
}
throw err
})
if (!retainMessages) {
// only keep last message (for waitForLast)
published.length = 0
}
published.push([
message,
// @ts-expect-error
request,
])
if (connectionDone) { return }
await afterEach(message, request as PublishRequest)
checkDone()
await wait(delay) // ensure timestamp increments for reliable resend response in test.
checkDone()
}
const published: [ message: any, request: PublishRequest ][] = []
/* eslint-disable no-await-in-loop, no-loop-func */
const batchTasks: Promise<any>[] = []
let batches = 1
for (let i = 0; i < n; i++) {
if (connectionDone) {
await Promise.allSettled(batchTasks)
break
}
if (batchTasks.length < batchSize) {
client.debug('adding to batch', { i, batchTasks: batchTasks.length, batches })
// fill batch
batchTasks.push(publishMessage({
index: i,
batchIndex: batchTasks.length,
batch: batches,
total: n,
}))
}
if (batchTasks.length >= batchSize || i >= n) {
// batch is full, or finished all messages
// wait for tasks
const tasks = batchTasks.slice()
batchTasks.length = 0
batches += 1
client.debug('executing batch', { i, batchTasks: tasks.length, batches })
await Promise.allSettled(tasks)
await Promise.all(tasks)
}
}
/* eslint-enable no-await-in-loop, no-loop-func */
checkDone()
if (waitForLast) {
const msg = published[published.length - 1][1]
await getWaitForStorage(client)(msg, {
streamId,
streamPartition,
timeout: waitForLastTimeout,
count: waitForLastCount,
messageMatchFn(m: any, b: any) {
checkDone()
return m.streamMessage.signature === b.signature
}
})
}
return published
} finally {
client.connection.off('done', onDone)
}
}
const publishTestMessages = async (...args: any[]) => {
const published = await publishTestMessagesRaw(...args)
return published.map(([msg]) => msg)
}
publishTestMessages.raw = publishTestMessagesRaw
return publishTestMessages
}
export const createMockAddress = () => '0x000000000000000000000000000' + Date.now()
export const createClient = (providerSidechain?: providers.JsonRpcProvider) => {
const wallet = new Wallet(`0x100000000000000000000000000000000000000012300000001${Date.now()}`, providerSidechain)
return new StreamrClient({
...config.clientOptions,
auth: {
privateKey: wallet.privateKey
}
})
}
export const expectInvalidAddress = (operation: () => Promise<any>) => {
return expect(() => operation()).rejects.toThrow('invalid address')
}
// eslint-disable-next-line no-undef
const getTestName = (module: NodeModule) => {
const fileNamePattern = new RegExp('.*/(.*).test\\...')
const groups = module.filename.match(fileNamePattern)
return (groups !== null) ? groups[1] : module.filename
}
const randomTestRunId = crypto.randomBytes(4).toString('hex')
// eslint-disable-next-line no-undef
export const createRelativeTestStreamId = (module: NodeModule, suffix?: string) => {
return counterId(`/test/${randomTestRunId}/${getTestName(module)}${(suffix !== undefined) ? '-' + suffix : ''}`, '-')
}
// eslint-disable-next-line no-undef
export const createTestStream = (streamrClient: StreamrClient, module: NodeModule, props?: Partial<StreamProperties>) => {
return streamrClient.createStream({
id: createRelativeTestStreamId(module),
...props
})
}
/**
* Write a heap snapshot file if WRITE_SNAPSHOTS env var is set.
*/
export function snapshot() {
if (!process.env.WRITE_SNAPSHOTS) { return '' }
testDebugRoot('heap snapshot >>')
const value = writeHeapSnapshot()
testDebugRoot('heap snapshot <<', value)
return value
}
const testUtilsCounter = CounterId('test/utils')
export class LeaksDetector {
leakDetectors: Map<string, LeakDetector> = new Map()
private counter = CounterId(testUtilsCounter(this.constructor.name))
add(name: string, obj: any) {
this.leakDetectors.set(this.counter(name), new LeakDetector(obj))
}
async getLeaks(): Promise<string[]> {
const results = await Promise.all([...this.leakDetectors.entries()].map(async ([key, d]) => {
const isLeaking = await d.isLeaking()
return isLeaking ? key : undefined
}))
return results.filter((key) => key != null) as string[]
}
async checkNoLeaks() {
const leaks = await this.getLeaks()
if (leaks.length) {
throw new Error(format('Leaking %d of %d items: %o', leaks.length, this.leakDetectors.size, leaks))
}
}
clear() {
this.leakDetectors.clear()
}
}
| const addAfter = addAfterFn()
return (...args: Parameters<typeof setTimeout>) => {
const t = setTimeout(...args)
addAfter(() => {
clearTimeout(t)
})
return t
}
}
| identifier_body |
utils.ts | import crypto from 'crypto'
import { writeHeapSnapshot } from 'v8'
import { wait } from 'streamr-test-utils'
import { providers, Wallet } from 'ethers'
import { PublishRequest } from 'streamr-client-protocol'
import LeakDetector from 'jest-leak-detector'
import { pTimeout, counterId, CounterId, AggregatedError, pLimitFn } from '../src/utils'
import { Debug, inspect, format } from '../src/utils/log'
import { MaybeAsync } from '../src/types'
import { validateOptions } from '../src/stream/utils'
import type { StreamPartDefinitionOptions, StreamProperties } from '../src/stream'
import { StreamrClient } from '../src/StreamrClient'
import config from './integration/config'
const testDebugRoot = Debug('test')
const testDebug = testDebugRoot.extend.bind(testDebugRoot)
export {
testDebug as Debug
}
export const uid = (prefix?: string) => counterId(`p${process.pid}${prefix ? '-' + prefix : ''}`)
export function fakePrivateKey() {
return crypto.randomBytes(32).toString('hex')
}
export function fakeAddress() {
return crypto.randomBytes(32).toString('hex').slice(0, 40)
}
const TEST_REPEATS = (process.env.TEST_REPEATS) ? parseInt(process.env.TEST_REPEATS, 10) : 1
export function describeRepeats(msg: any, fn: any, describeFn = describe) {
for (let k = 0; k < TEST_REPEATS; k++) {
// eslint-disable-next-line no-loop-func
describe(msg, () => {
describeFn(`test repeat ${k + 1} of ${TEST_REPEATS}`, fn)
})
}
}
describeRepeats.skip = (msg: any, fn: any) => {
describe.skip(`${msg} – test repeat ALL of ${TEST_REPEATS}`, fn)
}
describeRepeats.only = (msg: any, fn: any) => {
describeRepeats(msg, fn, describe.only)
}
export async function collect(iterator: any, fn: MaybeAsync<(item: any) => void> = async () => {}) {
const received: any[] = []
for await (const msg of iterator) {
received.push(msg.getParsedContent())
await fn({
msg, iterator, received,
})
}
return received
}
export function getTestSetTimeout(): (...args: Parameters<typeof setTimeout>) => ReturnType<typeof setTimeout> {
const addAfter = addAfterFn()
return (...args: Parameters<typeof setTimeout>) => {
const t = setTimeout(...args)
addAfter(() => {
clearTimeout(t)
})
return t
}
}
export function addAfterFn() {
const afterFns: any[] = []
afterEach(async () => {
const fns = afterFns.slice()
afterFns.length = 0
// @ts-expect-error
AggregatedError.throwAllSettled(await Promise.allSettled(fns.map((fn) => fn())))
})
return (fn: any) => {
afterFns.push(fn)
}
}
export const Msg = (opts?: any) => ({
value: uid('msg'),
...opts,
})
function defaultMessageMatchFn(msgTarget: any, msgGot: any) {
if (msgTarget.streamMessage.signature) {
// compare signatures by default
return msgTarget.streamMessage.signature === msgGot.signature
} | /* eslint-disable no-await-in-loop */
return async (publishRequest: any, opts = {}) => {
const {
streamId,
streamPartition = 0,
interval = 500,
timeout = 10000,
count = 100,
messageMatchFn = defaultMessageMatchFn
} = validateOptions({
...defaultOpts,
...opts,
})
if (!publishRequest && !publishRequest.streamMessage) {
throw new Error(`should check against publish request for comparison, got: ${inspect(publishRequest)}`)
}
const start = Date.now()
let last: any
// eslint-disable-next-line no-constant-condition
let found = false
while (!found) {
const duration = Date.now() - start
if (duration > timeout) {
client.debug('waitForStorage timeout %o', {
timeout,
duration
}, {
publishRequest,
last: last!.map((l: any) => l.content),
})
const err: any = new Error(`timed out after ${duration}ms waiting for message: ${inspect(publishRequest)}`)
err.publishRequest = publishRequest
throw err
}
last = await client.getStreamLast({
// @ts-expect-error
streamId,
streamPartition,
count,
})
for (const lastMsg of last) {
if (messageMatchFn(publishRequest, lastMsg)) {
found = true
return
}
}
client.debug('message not found, retrying... %o', {
msg: publishRequest.streamMessage.getParsedContent(),
last: last.map(({ content }: any) => content)
})
await wait(interval)
}
}
/* eslint-enable no-await-in-loop */
}
export type CreateMessageOpts = {
/** index of message in total */
index: number,
/** batch number */
batch: number,
/** index of message in batch */
batchIndex: number,
/** total messages */
total: number
}
export type PublishOpts = {
testName: string,
delay: number
timeout: number
/** set false to allow gc message content */
retainMessages: boolean,
waitForLast: boolean
waitForLastCount: number
waitForLastTimeout: number
beforeEach: (m: any) => any
afterEach: (msg: any, request: PublishRequest) => Promise<void> | void
timestamp: number | (() => number)
partitionKey: string
createMessage: (opts: CreateMessageOpts) => Promise<any> | any
batchSize: number
}
type PublishTestMessagesOpts = StreamPartDefinitionOptions & Partial<PublishOpts>
export function getPublishTestMessages(client: StreamrClient, defaultOptsOrStreamId: string | PublishTestMessagesOpts = {}) {
// second argument could also be streamId
let defaultOpts: PublishTestMessagesOpts
if (typeof defaultOptsOrStreamId === 'string') {
// eslint-disable-next-line no-param-reassign
defaultOpts = {
streamId: defaultOptsOrStreamId as string,
}
} else {
defaultOpts = defaultOptsOrStreamId as PublishTestMessagesOpts
}
const publishTestMessagesRaw = async (n = 4, opts: PublishTestMessagesOpts = {}) => {
const id = 'testName' in opts ? opts.testName : uid('test')
let msgCount = 0
const {
streamId,
streamPartition = 0,
retainMessages = true,
delay = 100,
timeout = 3500,
waitForLast = false, // wait for message to hit storage
waitForLastCount,
waitForLastTimeout,
beforeEach = (m: any) => m,
afterEach = () => {},
timestamp,
partitionKey,
batchSize = 1,
createMessage = () => {
msgCount += 1
return {
test: id,
value: `${msgCount} of ${n}`
}
},
} = validateOptions<PublishTestMessagesOpts>({
...defaultOpts,
...opts,
})
let connectionDone = false
function checkDone() {
if (connectionDone) {
throw new Error('Connection done before finished publishing')
}
}
const onDone = () => {
connectionDone = true
}
try {
client.connection.once('done', onDone)
// async queue to ensure messages set up in order
const setupMessage = pLimitFn(async (publishOpts) => {
const message = createMessage(publishOpts)
await beforeEach(message)
return message
})
const publishMessage = async (publishOpts: CreateMessageOpts) => {
if (connectionDone) { return }
const message = await setupMessage(publishOpts)
if (connectionDone) { return }
const { index } = publishOpts
const request = await pTimeout(client.publish(
{ streamId, streamPartition },
message,
typeof timestamp === 'function' ? timestamp() : timestamp,
partitionKey
), timeout, `publish timeout ${streamId}: ${index} ${inspect(message, {
maxStringLength: 256,
})}`).catch((err) => {
if (connectionDone && err.message.includes('Needs connection')) {
// ignore connection closed error
return
}
throw err
})
if (!retainMessages) {
// only keep last message (for waitForLast)
published.length = 0
}
published.push([
message,
// @ts-expect-error
request,
])
if (connectionDone) { return }
await afterEach(message, request as PublishRequest)
checkDone()
await wait(delay) // ensure timestamp increments for reliable resend response in test.
checkDone()
}
const published: [ message: any, request: PublishRequest ][] = []
/* eslint-disable no-await-in-loop, no-loop-func */
const batchTasks: Promise<any>[] = []
let batches = 1
for (let i = 0; i < n; i++) {
if (connectionDone) {
await Promise.allSettled(batchTasks)
break
}
if (batchTasks.length < batchSize) {
client.debug('adding to batch', { i, batchTasks: batchTasks.length, batches })
// fill batch
batchTasks.push(publishMessage({
index: i,
batchIndex: batchTasks.length,
batch: batches,
total: n,
}))
}
if (batchTasks.length >= batchSize || i >= n) {
// batch is full, or finished all messages
// wait for tasks
const tasks = batchTasks.slice()
batchTasks.length = 0
batches += 1
client.debug('executing batch', { i, batchTasks: tasks.length, batches })
await Promise.allSettled(tasks)
await Promise.all(tasks)
}
}
/* eslint-enable no-await-in-loop, no-loop-func */
checkDone()
if (waitForLast) {
const msg = published[published.length - 1][1]
await getWaitForStorage(client)(msg, {
streamId,
streamPartition,
timeout: waitForLastTimeout,
count: waitForLastCount,
messageMatchFn(m: any, b: any) {
checkDone()
return m.streamMessage.signature === b.signature
}
})
}
return published
} finally {
client.connection.off('done', onDone)
}
}
const publishTestMessages = async (...args: any[]) => {
const published = await publishTestMessagesRaw(...args)
return published.map(([msg]) => msg)
}
publishTestMessages.raw = publishTestMessagesRaw
return publishTestMessages
}
export const createMockAddress = () => '0x000000000000000000000000000' + Date.now()
export const createClient = (providerSidechain?: providers.JsonRpcProvider) => {
const wallet = new Wallet(`0x100000000000000000000000000000000000000012300000001${Date.now()}`, providerSidechain)
return new StreamrClient({
...config.clientOptions,
auth: {
privateKey: wallet.privateKey
}
})
}
export const expectInvalidAddress = (operation: () => Promise<any>) => {
return expect(() => operation()).rejects.toThrow('invalid address')
}
// eslint-disable-next-line no-undef
const getTestName = (module: NodeModule) => {
const fileNamePattern = new RegExp('.*/(.*).test\\...')
const groups = module.filename.match(fileNamePattern)
return (groups !== null) ? groups[1] : module.filename
}
const randomTestRunId = crypto.randomBytes(4).toString('hex')
// eslint-disable-next-line no-undef
export const createRelativeTestStreamId = (module: NodeModule, suffix?: string) => {
return counterId(`/test/${randomTestRunId}/${getTestName(module)}${(suffix !== undefined) ? '-' + suffix : ''}`, '-')
}
// eslint-disable-next-line no-undef
export const createTestStream = (streamrClient: StreamrClient, module: NodeModule, props?: Partial<StreamProperties>) => {
return streamrClient.createStream({
id: createRelativeTestStreamId(module),
...props
})
}
/**
* Write a heap snapshot file if WRITE_SNAPSHOTS env var is set.
*/
export function snapshot() {
if (!process.env.WRITE_SNAPSHOTS) { return '' }
testDebugRoot('heap snapshot >>')
const value = writeHeapSnapshot()
testDebugRoot('heap snapshot <<', value)
return value
}
const testUtilsCounter = CounterId('test/utils')
export class LeaksDetector {
leakDetectors: Map<string, LeakDetector> = new Map()
private counter = CounterId(testUtilsCounter(this.constructor.name))
add(name: string, obj: any) {
this.leakDetectors.set(this.counter(name), new LeakDetector(obj))
}
async getLeaks(): Promise<string[]> {
const results = await Promise.all([...this.leakDetectors.entries()].map(async ([key, d]) => {
const isLeaking = await d.isLeaking()
return isLeaking ? key : undefined
}))
return results.filter((key) => key != null) as string[]
}
async checkNoLeaks() {
const leaks = await this.getLeaks()
if (leaks.length) {
throw new Error(format('Leaking %d of %d items: %o', leaks.length, this.leakDetectors.size, leaks))
}
}
clear() {
this.leakDetectors.clear()
}
} | return JSON.stringify(msgGot.content) === JSON.stringify(msgTarget.streamMessage.getParsedContent())
}
export function getWaitForStorage(client: StreamrClient, defaultOpts = {}) { | random_line_split |
utils.ts | import crypto from 'crypto'
import { writeHeapSnapshot } from 'v8'
import { wait } from 'streamr-test-utils'
import { providers, Wallet } from 'ethers'
import { PublishRequest } from 'streamr-client-protocol'
import LeakDetector from 'jest-leak-detector'
import { pTimeout, counterId, CounterId, AggregatedError, pLimitFn } from '../src/utils'
import { Debug, inspect, format } from '../src/utils/log'
import { MaybeAsync } from '../src/types'
import { validateOptions } from '../src/stream/utils'
import type { StreamPartDefinitionOptions, StreamProperties } from '../src/stream'
import { StreamrClient } from '../src/StreamrClient'
import config from './integration/config'
const testDebugRoot = Debug('test')
const testDebug = testDebugRoot.extend.bind(testDebugRoot)
export {
testDebug as Debug
}
export const uid = (prefix?: string) => counterId(`p${process.pid}${prefix ? '-' + prefix : ''}`)
export function fakePrivateKey() {
return crypto.randomBytes(32).toString('hex')
}
export function fakeAddress() {
return crypto.randomBytes(32).toString('hex').slice(0, 40)
}
const TEST_REPEATS = (process.env.TEST_REPEATS) ? parseInt(process.env.TEST_REPEATS, 10) : 1
export function describeRepeats(msg: any, fn: any, describeFn = describe) {
for (let k = 0; k < TEST_REPEATS; k++) {
// eslint-disable-next-line no-loop-func
describe(msg, () => {
describeFn(`test repeat ${k + 1} of ${TEST_REPEATS}`, fn)
})
}
}
describeRepeats.skip = (msg: any, fn: any) => {
describe.skip(`${msg} – test repeat ALL of ${TEST_REPEATS}`, fn)
}
describeRepeats.only = (msg: any, fn: any) => {
describeRepeats(msg, fn, describe.only)
}
export async function collect(iterator: any, fn: MaybeAsync<(item: any) => void> = async () => {}) {
const received: any[] = []
for await (const msg of iterator) {
received.push(msg.getParsedContent())
await fn({
msg, iterator, received,
})
}
return received
}
export function getTestSetTimeout(): (...args: Parameters<typeof setTimeout>) => ReturnType<typeof setTimeout> {
const addAfter = addAfterFn()
return (...args: Parameters<typeof setTimeout>) => {
const t = setTimeout(...args)
addAfter(() => {
clearTimeout(t)
})
return t
}
}
export function addAfterFn() {
const afterFns: any[] = []
afterEach(async () => {
const fns = afterFns.slice()
afterFns.length = 0
// @ts-expect-error
AggregatedError.throwAllSettled(await Promise.allSettled(fns.map((fn) => fn())))
})
return (fn: any) => {
afterFns.push(fn)
}
}
export const Msg = (opts?: any) => ({
value: uid('msg'),
...opts,
})
function defaultMessageMatchFn(msgTarget: any, msgGot: any) {
if (msgTarget.streamMessage.signature) {
// compare signatures by default
return msgTarget.streamMessage.signature === msgGot.signature
}
return JSON.stringify(msgGot.content) === JSON.stringify(msgTarget.streamMessage.getParsedContent())
}
export function getWaitForStorage(client: StreamrClient, defaultOpts = {}) {
/* eslint-disable no-await-in-loop */
return async (publishRequest: any, opts = {}) => {
const {
streamId,
streamPartition = 0,
interval = 500,
timeout = 10000,
count = 100,
messageMatchFn = defaultMessageMatchFn
} = validateOptions({
...defaultOpts,
...opts,
})
if (!publishRequest && !publishRequest.streamMessage) {
throw new Error(`should check against publish request for comparison, got: ${inspect(publishRequest)}`)
}
const start = Date.now()
let last: any
// eslint-disable-next-line no-constant-condition
let found = false
while (!found) {
const duration = Date.now() - start
if (duration > timeout) {
client.debug('waitForStorage timeout %o', {
timeout,
duration
}, {
publishRequest,
last: last!.map((l: any) => l.content),
})
const err: any = new Error(`timed out after ${duration}ms waiting for message: ${inspect(publishRequest)}`)
err.publishRequest = publishRequest
throw err
}
last = await client.getStreamLast({
// @ts-expect-error
streamId,
streamPartition,
count,
})
for (const lastMsg of last) {
if (messageMatchFn(publishRequest, lastMsg)) {
found = true
return
}
}
client.debug('message not found, retrying... %o', {
msg: publishRequest.streamMessage.getParsedContent(),
last: last.map(({ content }: any) => content)
})
await wait(interval)
}
}
/* eslint-enable no-await-in-loop */
}
export type CreateMessageOpts = {
/** index of message in total */
index: number,
/** batch number */
batch: number,
/** index of message in batch */
batchIndex: number,
/** total messages */
total: number
}
export type PublishOpts = {
testName: string,
delay: number
timeout: number
/** set false to allow gc message content */
retainMessages: boolean,
waitForLast: boolean
waitForLastCount: number
waitForLastTimeout: number
beforeEach: (m: any) => any
afterEach: (msg: any, request: PublishRequest) => Promise<void> | void
timestamp: number | (() => number)
partitionKey: string
createMessage: (opts: CreateMessageOpts) => Promise<any> | any
batchSize: number
}
type PublishTestMessagesOpts = StreamPartDefinitionOptions & Partial<PublishOpts>
export function getPublishTestMessages(client: StreamrClient, defaultOptsOrStreamId: string | PublishTestMessagesOpts = {}) {
// second argument could also be streamId
let defaultOpts: PublishTestMessagesOpts
if (typeof defaultOptsOrStreamId === 'string') {
// eslint-disable-next-line no-param-reassign
defaultOpts = {
streamId: defaultOptsOrStreamId as string,
}
} else {
defaultOpts = defaultOptsOrStreamId as PublishTestMessagesOpts
}
const publishTestMessagesRaw = async (n = 4, opts: PublishTestMessagesOpts = {}) => {
const id = 'testName' in opts ? opts.testName : uid('test')
let msgCount = 0
const {
streamId,
streamPartition = 0,
retainMessages = true,
delay = 100,
timeout = 3500,
waitForLast = false, // wait for message to hit storage
waitForLastCount,
waitForLastTimeout,
beforeEach = (m: any) => m,
afterEach = () => {},
timestamp,
partitionKey,
batchSize = 1,
createMessage = () => {
msgCount += 1
return {
test: id,
value: `${msgCount} of ${n}`
}
},
} = validateOptions<PublishTestMessagesOpts>({
...defaultOpts,
...opts,
})
let connectionDone = false
function checkDone() {
if (connectionDone) {
throw new Error('Connection done before finished publishing')
}
}
const onDone = () => {
connectionDone = true
}
try {
client.connection.once('done', onDone)
// async queue to ensure messages set up in order
const setupMessage = pLimitFn(async (publishOpts) => {
const message = createMessage(publishOpts)
await beforeEach(message)
return message
})
const publishMessage = async (publishOpts: CreateMessageOpts) => {
if (connectionDone) { return }
const message = await setupMessage(publishOpts)
if (connectionDone) { return }
const { index } = publishOpts
const request = await pTimeout(client.publish(
{ streamId, streamPartition },
message,
typeof timestamp === 'function' ? timestamp() : timestamp,
partitionKey
), timeout, `publish timeout ${streamId}: ${index} ${inspect(message, {
maxStringLength: 256,
})}`).catch((err) => {
if (connectionDone && err.message.includes('Needs connection')) {
// ignore connection closed error
return
}
throw err
})
if (!retainMessages) {
// only keep last message (for waitForLast)
published.length = 0
}
published.push([
message,
// @ts-expect-error
request,
])
if (connectionDone) { return }
await afterEach(message, request as PublishRequest)
checkDone()
await wait(delay) // ensure timestamp increments for reliable resend response in test.
checkDone()
}
const published: [ message: any, request: PublishRequest ][] = []
/* eslint-disable no-await-in-loop, no-loop-func */
const batchTasks: Promise<any>[] = []
let batches = 1
for (let i = 0; i < n; i++) {
if (connectionDone) {
await Promise.allSettled(batchTasks)
break
}
if (batchTasks.length < batchSize) {
client.debug('adding to batch', { i, batchTasks: batchTasks.length, batches })
// fill batch
batchTasks.push(publishMessage({
index: i,
batchIndex: batchTasks.length,
batch: batches,
total: n,
}))
}
if (batchTasks.length >= batchSize || i >= n) {
// batch is full, or finished all messages
// wait for tasks
const tasks = batchTasks.slice()
batchTasks.length = 0
batches += 1
client.debug('executing batch', { i, batchTasks: tasks.length, batches })
await Promise.allSettled(tasks)
await Promise.all(tasks)
}
}
/* eslint-enable no-await-in-loop, no-loop-func */
checkDone()
if (waitForLast) {
const msg = published[published.length - 1][1]
await getWaitForStorage(client)(msg, {
streamId,
streamPartition,
timeout: waitForLastTimeout,
count: waitForLastCount,
messageMatchFn(m: any, b: any) {
checkDone()
return m.streamMessage.signature === b.signature
}
})
}
return published
} finally {
client.connection.off('done', onDone)
}
}
const publishTestMessages = async (...args: any[]) => {
const published = await publishTestMessagesRaw(...args)
return published.map(([msg]) => msg)
}
publishTestMessages.raw = publishTestMessagesRaw
return publishTestMessages
}
export const createMockAddress = () => '0x000000000000000000000000000' + Date.now()
export const createClient = (providerSidechain?: providers.JsonRpcProvider) => {
const wallet = new Wallet(`0x100000000000000000000000000000000000000012300000001${Date.now()}`, providerSidechain)
return new StreamrClient({
...config.clientOptions,
auth: {
privateKey: wallet.privateKey
}
})
}
export const expectInvalidAddress = (operation: () => Promise<any>) => {
return expect(() => operation()).rejects.toThrow('invalid address')
}
// eslint-disable-next-line no-undef
const getTestName = (module: NodeModule) => {
const fileNamePattern = new RegExp('.*/(.*).test\\...')
const groups = module.filename.match(fileNamePattern)
return (groups !== null) ? groups[1] : module.filename
}
const randomTestRunId = crypto.randomBytes(4).toString('hex')
// eslint-disable-next-line no-undef
export const createRelativeTestStreamId = (module: NodeModule, suffix?: string) => {
return counterId(`/test/${randomTestRunId}/${getTestName(module)}${(suffix !== undefined) ? '-' + suffix : ''}`, '-')
}
// eslint-disable-next-line no-undef
export const createTestStream = (streamrClient: StreamrClient, module: NodeModule, props?: Partial<StreamProperties>) => {
return streamrClient.createStream({
id: createRelativeTestStreamId(module),
...props
})
}
/**
* Write a heap snapshot file if WRITE_SNAPSHOTS env var is set.
*/
export function snapshot() {
if (!process.env.WRITE_SNAPSHOTS) { return '' }
testDebugRoot('heap snapshot >>')
const value = writeHeapSnapshot()
testDebugRoot('heap snapshot <<', value)
return value
}
const testUtilsCounter = CounterId('test/utils')
export class LeaksDetector {
leakDetectors: Map<string, LeakDetector> = new Map()
private counter = CounterId(testUtilsCounter(this.constructor.name))
add(name: string, obj: any) {
this.leakDetectors.set(this.counter(name), new LeakDetector(obj))
}
async getLeaks(): Promise<string[]> {
const results = await Promise.all([...this.leakDetectors.entries()].map(async ([key, d]) => {
const isLeaking = await d.isLeaking()
return isLeaking ? key : undefined
}))
return results.filter((key) => key != null) as string[]
}
async ch | {
const leaks = await this.getLeaks()
if (leaks.length) {
throw new Error(format('Leaking %d of %d items: %o', leaks.length, this.leakDetectors.size, leaks))
}
}
clear() {
this.leakDetectors.clear()
}
}
| eckNoLeaks() | identifier_name |
utils.ts | import crypto from 'crypto'
import { writeHeapSnapshot } from 'v8'
import { wait } from 'streamr-test-utils'
import { providers, Wallet } from 'ethers'
import { PublishRequest } from 'streamr-client-protocol'
import LeakDetector from 'jest-leak-detector'
import { pTimeout, counterId, CounterId, AggregatedError, pLimitFn } from '../src/utils'
import { Debug, inspect, format } from '../src/utils/log'
import { MaybeAsync } from '../src/types'
import { validateOptions } from '../src/stream/utils'
import type { StreamPartDefinitionOptions, StreamProperties } from '../src/stream'
import { StreamrClient } from '../src/StreamrClient'
import config from './integration/config'
const testDebugRoot = Debug('test')
const testDebug = testDebugRoot.extend.bind(testDebugRoot)
export {
testDebug as Debug
}
export const uid = (prefix?: string) => counterId(`p${process.pid}${prefix ? '-' + prefix : ''}`)
export function fakePrivateKey() {
return crypto.randomBytes(32).toString('hex')
}
export function fakeAddress() {
return crypto.randomBytes(32).toString('hex').slice(0, 40)
}
const TEST_REPEATS = (process.env.TEST_REPEATS) ? parseInt(process.env.TEST_REPEATS, 10) : 1
export function describeRepeats(msg: any, fn: any, describeFn = describe) {
for (let k = 0; k < TEST_REPEATS; k++) {
// eslint-disable-next-line no-loop-func
describe(msg, () => {
describeFn(`test repeat ${k + 1} of ${TEST_REPEATS}`, fn)
})
}
}
describeRepeats.skip = (msg: any, fn: any) => {
describe.skip(`${msg} – test repeat ALL of ${TEST_REPEATS}`, fn)
}
describeRepeats.only = (msg: any, fn: any) => {
describeRepeats(msg, fn, describe.only)
}
export async function collect(iterator: any, fn: MaybeAsync<(item: any) => void> = async () => {}) {
const received: any[] = []
for await (const msg of iterator) {
received.push(msg.getParsedContent())
await fn({
msg, iterator, received,
})
}
return received
}
export function getTestSetTimeout(): (...args: Parameters<typeof setTimeout>) => ReturnType<typeof setTimeout> {
const addAfter = addAfterFn()
return (...args: Parameters<typeof setTimeout>) => {
const t = setTimeout(...args)
addAfter(() => {
clearTimeout(t)
})
return t
}
}
export function addAfterFn() {
const afterFns: any[] = []
afterEach(async () => {
const fns = afterFns.slice()
afterFns.length = 0
// @ts-expect-error
AggregatedError.throwAllSettled(await Promise.allSettled(fns.map((fn) => fn())))
})
return (fn: any) => {
afterFns.push(fn)
}
}
export const Msg = (opts?: any) => ({
value: uid('msg'),
...opts,
})
function defaultMessageMatchFn(msgTarget: any, msgGot: any) {
if (msgTarget.streamMessage.signature) {
// compare signatures by default
return msgTarget.streamMessage.signature === msgGot.signature
}
return JSON.stringify(msgGot.content) === JSON.stringify(msgTarget.streamMessage.getParsedContent())
}
export function getWaitForStorage(client: StreamrClient, defaultOpts = {}) {
/* eslint-disable no-await-in-loop */
return async (publishRequest: any, opts = {}) => {
const {
streamId,
streamPartition = 0,
interval = 500,
timeout = 10000,
count = 100,
messageMatchFn = defaultMessageMatchFn
} = validateOptions({
...defaultOpts,
...opts,
})
if (!publishRequest && !publishRequest.streamMessage) {
throw new Error(`should check against publish request for comparison, got: ${inspect(publishRequest)}`)
}
const start = Date.now()
let last: any
// eslint-disable-next-line no-constant-condition
let found = false
while (!found) {
const duration = Date.now() - start
if (duration > timeout) {
client.debug('waitForStorage timeout %o', {
timeout,
duration
}, {
publishRequest,
last: last!.map((l: any) => l.content),
})
const err: any = new Error(`timed out after ${duration}ms waiting for message: ${inspect(publishRequest)}`)
err.publishRequest = publishRequest
throw err
}
last = await client.getStreamLast({
// @ts-expect-error
streamId,
streamPartition,
count,
})
for (const lastMsg of last) {
if (messageMatchFn(publishRequest, lastMsg)) {
found = true
return
}
}
client.debug('message not found, retrying... %o', {
msg: publishRequest.streamMessage.getParsedContent(),
last: last.map(({ content }: any) => content)
})
await wait(interval)
}
}
/* eslint-enable no-await-in-loop */
}
export type CreateMessageOpts = {
/** index of message in total */
index: number,
/** batch number */
batch: number,
/** index of message in batch */
batchIndex: number,
/** total messages */
total: number
}
export type PublishOpts = {
testName: string,
delay: number
timeout: number
/** set false to allow gc message content */
retainMessages: boolean,
waitForLast: boolean
waitForLastCount: number
waitForLastTimeout: number
beforeEach: (m: any) => any
afterEach: (msg: any, request: PublishRequest) => Promise<void> | void
timestamp: number | (() => number)
partitionKey: string
createMessage: (opts: CreateMessageOpts) => Promise<any> | any
batchSize: number
}
type PublishTestMessagesOpts = StreamPartDefinitionOptions & Partial<PublishOpts>
export function getPublishTestMessages(client: StreamrClient, defaultOptsOrStreamId: string | PublishTestMessagesOpts = {}) {
// second argument could also be streamId
let defaultOpts: PublishTestMessagesOpts
if (typeof defaultOptsOrStreamId === 'string') {
// eslint-disable-next-line no-param-reassign
defaultOpts = {
streamId: defaultOptsOrStreamId as string,
}
} else {
defaultOpts = defaultOptsOrStreamId as PublishTestMessagesOpts
}
const publishTestMessagesRaw = async (n = 4, opts: PublishTestMessagesOpts = {}) => {
const id = 'testName' in opts ? opts.testName : uid('test')
let msgCount = 0
const {
streamId,
streamPartition = 0,
retainMessages = true,
delay = 100,
timeout = 3500,
waitForLast = false, // wait for message to hit storage
waitForLastCount,
waitForLastTimeout,
beforeEach = (m: any) => m,
afterEach = () => {},
timestamp,
partitionKey,
batchSize = 1,
createMessage = () => {
msgCount += 1
return {
test: id,
value: `${msgCount} of ${n}`
}
},
} = validateOptions<PublishTestMessagesOpts>({
...defaultOpts,
...opts,
})
let connectionDone = false
function checkDone() {
if (connectionDone) {
throw new Error('Connection done before finished publishing')
}
}
const onDone = () => {
connectionDone = true
}
try {
client.connection.once('done', onDone)
// async queue to ensure messages set up in order
const setupMessage = pLimitFn(async (publishOpts) => {
const message = createMessage(publishOpts)
await beforeEach(message)
return message
})
const publishMessage = async (publishOpts: CreateMessageOpts) => {
if (connectionDone) { return }
const message = await setupMessage(publishOpts)
if (connectionDone) { return }
const { index } = publishOpts
const request = await pTimeout(client.publish(
{ streamId, streamPartition },
message,
typeof timestamp === 'function' ? timestamp() : timestamp,
partitionKey
), timeout, `publish timeout ${streamId}: ${index} ${inspect(message, {
maxStringLength: 256,
})}`).catch((err) => {
if (connectionDone && err.message.includes('Needs connection')) {
// ignore connection closed error
return
}
throw err
})
if (!retainMessages) {
// only keep last message (for waitForLast)
published.length = 0
}
published.push([
message,
// @ts-expect-error
request,
])
if (connectionDone) { return }
await afterEach(message, request as PublishRequest)
checkDone()
await wait(delay) // ensure timestamp increments for reliable resend response in test.
checkDone()
}
const published: [ message: any, request: PublishRequest ][] = []
/* eslint-disable no-await-in-loop, no-loop-func */
const batchTasks: Promise<any>[] = []
let batches = 1
for (let i = 0; i < n; i++) {
if (connectionDone) {
await Promise.allSettled(batchTasks)
break
}
if (batchTasks.length < batchSize) {
client.debug('adding to batch', { i, batchTasks: batchTasks.length, batches })
// fill batch
batchTasks.push(publishMessage({
index: i,
batchIndex: batchTasks.length,
batch: batches,
total: n,
}))
}
if (batchTasks.length >= batchSize || i >= n) {
// batch is full, or finished all messages
// wait for tasks
const tasks = batchTasks.slice()
batchTasks.length = 0
batches += 1
client.debug('executing batch', { i, batchTasks: tasks.length, batches })
await Promise.allSettled(tasks)
await Promise.all(tasks)
}
}
/* eslint-enable no-await-in-loop, no-loop-func */
checkDone()
if (waitForLast) {
const msg = published[published.length - 1][1]
await getWaitForStorage(client)(msg, {
streamId,
streamPartition,
timeout: waitForLastTimeout,
count: waitForLastCount,
messageMatchFn(m: any, b: any) {
checkDone()
return m.streamMessage.signature === b.signature
}
})
}
return published
} finally {
client.connection.off('done', onDone)
}
}
const publishTestMessages = async (...args: any[]) => {
const published = await publishTestMessagesRaw(...args)
return published.map(([msg]) => msg)
}
publishTestMessages.raw = publishTestMessagesRaw
return publishTestMessages
}
export const createMockAddress = () => '0x000000000000000000000000000' + Date.now()
export const createClient = (providerSidechain?: providers.JsonRpcProvider) => {
const wallet = new Wallet(`0x100000000000000000000000000000000000000012300000001${Date.now()}`, providerSidechain)
return new StreamrClient({
...config.clientOptions,
auth: {
privateKey: wallet.privateKey
}
})
}
export const expectInvalidAddress = (operation: () => Promise<any>) => {
return expect(() => operation()).rejects.toThrow('invalid address')
}
// eslint-disable-next-line no-undef
const getTestName = (module: NodeModule) => {
const fileNamePattern = new RegExp('.*/(.*).test\\...')
const groups = module.filename.match(fileNamePattern)
return (groups !== null) ? groups[1] : module.filename
}
const randomTestRunId = crypto.randomBytes(4).toString('hex')
// eslint-disable-next-line no-undef
export const createRelativeTestStreamId = (module: NodeModule, suffix?: string) => {
return counterId(`/test/${randomTestRunId}/${getTestName(module)}${(suffix !== undefined) ? '-' + suffix : ''}`, '-')
}
// eslint-disable-next-line no-undef
export const createTestStream = (streamrClient: StreamrClient, module: NodeModule, props?: Partial<StreamProperties>) => {
return streamrClient.createStream({
id: createRelativeTestStreamId(module),
...props
})
}
/**
* Write a heap snapshot file if WRITE_SNAPSHOTS env var is set.
*/
export function snapshot() {
if (!process.env.WRITE_SNAPSHOTS) { | testDebugRoot('heap snapshot >>')
const value = writeHeapSnapshot()
testDebugRoot('heap snapshot <<', value)
return value
}
const testUtilsCounter = CounterId('test/utils')
export class LeaksDetector {
leakDetectors: Map<string, LeakDetector> = new Map()
private counter = CounterId(testUtilsCounter(this.constructor.name))
add(name: string, obj: any) {
this.leakDetectors.set(this.counter(name), new LeakDetector(obj))
}
async getLeaks(): Promise<string[]> {
const results = await Promise.all([...this.leakDetectors.entries()].map(async ([key, d]) => {
const isLeaking = await d.isLeaking()
return isLeaking ? key : undefined
}))
return results.filter((key) => key != null) as string[]
}
async checkNoLeaks() {
const leaks = await this.getLeaks()
if (leaks.length) {
throw new Error(format('Leaking %d of %d items: %o', leaks.length, this.leakDetectors.size, leaks))
}
}
clear() {
this.leakDetectors.clear()
}
}
| return '' }
| conditional_block |
ask_plan.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: ask_plan.proto
/*
Package sonm is a generated protocol buffer package.
It is generated from these files:
ask_plan.proto
benchmarks.proto
bigint.proto
capabilities.proto
container.proto
dwh.proto
insonmnia.proto
marketplace.proto
net.proto
node.proto
relay.proto
rendezvous.proto
timestamp.proto
volume.proto
worker.proto
It has these top-level messages:
AskPlanCPU
AskPlanGPU
AskPlanRAM
AskPlanStorage
AskPlanNetwork
AskPlanResources
AskPlan
Benchmark
BigInt
CPUDevice
CPU
RAMDevice
RAM
GPUDevice
GPU
Network
StorageDevice
Storage
NetworkSpec
Container
SortingOption
DealsRequest
DWHDealsReply
DWHDeal
DealConditionsRequest
DealConditionsReply
OrdersRequest
MatchingOrdersRequest
DWHOrdersReply
DWHOrder
DealCondition
DWHWorker
ProfilesRequest
ProfilesReply
Profile
BlacklistRequest
BlacklistReply
ValidatorsRequest
ValidatorsReply
Validator
DealChangeRequestsReply
DealChangeRequest
DealPayment
WorkersRequest
WorkersReply
Certificate
MaxMinUint64
MaxMinBig
MaxMinTimestamp
CmpUint64
BlacklistQuery
Empty
ID
EthID
TaskID
Count
CPUUsage
MemoryUsage
NetworkUsage
ResourceUsage
ContainerRestartPolicy
TaskLogsRequest
TaskLogsChunk
TaskResourceRequirements
Chunk
Progress
Duration
EthAddress
DataSize
DataSizeRate
Price
GetOrdersReply
Benchmarks
Deal
Order
BidNetwork
BidResources
BidOrder
Addr
SocketAddr
Endpoints
JoinNetworkRequest
TaskListRequest
DealFinishRequest
DealsReply
OpenDealRequest
WorkerRemoveRequest
WorkerListReply
BalanceReply
HandshakeRequest
DiscoverResponse
HandshakeResponse
RelayClusterReply
RelayMetrics
NetMetrics
ConnectRequest
PublishRequest
RendezvousReply
RendezvousState
RendezvousMeeting
ResolveMetaReply
Timestamp
Volume
StartTaskRequest
WorkerJoinNetworkRequest
StartTaskReply
StatusReply
AskPlansReply
TaskListReply
DevicesReply
PullTaskRequest
DealInfoReply
TaskStatusReply
StatusMapReply
*/
package sonm
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type AskPlan_Status int32
const (
AskPlan_ACTIVE AskPlan_Status = 0
AskPlan_PENDING_DELETION AskPlan_Status = 1
)
var AskPlan_Status_name = map[int32]string{
0: "ACTIVE",
1: "PENDING_DELETION",
}
var AskPlan_Status_value = map[string]int32{
"ACTIVE": 0,
"PENDING_DELETION": 1,
}
func (x AskPlan_Status) String() string {
return proto.EnumName(AskPlan_Status_name, int32(x))
}
func (AskPlan_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} }
type AskPlanCPU struct {
CorePercents uint64 `protobuf:"varint,1,opt,name=core_percents,json=corePercents" json:"core_percents,omitempty"`
}
func (m *AskPlanCPU) Reset() { *m = AskPlanCPU{} }
func (m *AskPlanCPU) String() string { return proto.CompactTextString(m) }
func (*AskPlanCPU) ProtoMessage() {}
func (*AskPlanCPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *AskPlanCPU) GetCorePercents() uint64 {
if m != nil {
return m.CorePercents
}
return 0
}
type AskPlanGPU struct {
Indexes []uint64 `protobuf:"varint,1,rep,packed,name=indexes" json:"indexes,omitempty"`
Hashes []string `protobuf:"bytes,2,rep,name=hashes" json:"hashes,omitempty"`
}
func (m *AskPlanGPU) Reset() { *m = AskPlanGPU{} }
func (m *AskPlanGPU) String() string { return proto.CompactTextString(m) }
func (*AskPlanGPU) ProtoMessage() {}
func (*AskPlanGPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *AskPlanGPU) GetIndexes() []uint64 {
if m != nil {
return m.Indexes
}
return nil
}
func (m *AskPlanGPU) GetHashes() []string {
if m != nil {
return m.Hashes
}
return nil
}
type AskPlanRAM struct {
Size *DataSize `protobuf:"bytes,1,opt,name=size" json:"size,omitempty"`
}
func (m *AskPlanRAM) Reset() { *m = AskPlanRAM{} }
func (m *AskPlanRAM) String() string { return proto.CompactTextString(m) }
func (*AskPlanRAM) ProtoMessage() {}
func (*AskPlanRAM) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *AskPlanRAM) GetSize() *DataSize {
if m != nil {
return m.Size
}
return nil
}
type AskPlanStorage struct {
Size *DataSize `protobuf:"bytes,1,opt,name=size" json:"size,omitempty"`
}
func (m *AskPlanStorage) Reset() { *m = AskPlanStorage{} }
func (m *AskPlanStorage) String() string { return proto.CompactTextString(m) }
func (*AskPlanStorage) ProtoMessage() {}
func (*AskPlanStorage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *AskPlanStorage) GetSize() *DataSize {
if m != nil {
return m.Size
}
return nil
}
type AskPlanNetwork struct {
ThroughputIn *DataSizeRate `protobuf:"bytes,1,opt,name=throughputIn" json:"throughputIn,omitempty"`
ThroughputOut *DataSizeRate `protobuf:"bytes,2,opt,name=throughputOut" json:"throughputOut,omitempty"`
Overlay bool `protobuf:"varint,3,opt,name=overlay" json:"overlay,omitempty"`
Outbound bool `protobuf:"varint,4,opt,name=outbound" json:"outbound,omitempty"`
Incoming bool `protobuf:"varint,5,opt,name=incoming" json:"incoming,omitempty"`
}
func (m *AskPlanNetwork) Reset() { *m = AskPlanNetwork{} }
func (m *AskPlanNetwork) String() string { return proto.CompactTextString(m) }
func (*AskPlanNetwork) ProtoMessage() {}
func (*AskPlanNetwork) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *AskPlanNetwork) GetThroughputIn() *DataSizeRate {
if m != nil {
return m.ThroughputIn
}
return nil
}
func (m *AskPlanNetwork) GetThroughputOut() *DataSizeRate {
if m != nil {
return m.ThroughputOut
} |
func (m *AskPlanNetwork) GetOverlay() bool {
if m != nil {
return m.Overlay
}
return false
}
func (m *AskPlanNetwork) GetOutbound() bool {
if m != nil {
return m.Outbound
}
return false
}
func (m *AskPlanNetwork) GetIncoming() bool {
if m != nil {
return m.Incoming
}
return false
}
type AskPlanResources struct {
CPU *AskPlanCPU `protobuf:"bytes,1,opt,name=CPU" json:"CPU,omitempty"`
RAM *AskPlanRAM `protobuf:"bytes,2,opt,name=RAM" json:"RAM,omitempty"`
Storage *AskPlanStorage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
GPU *AskPlanGPU `protobuf:"bytes,4,opt,name=GPU" json:"GPU,omitempty"`
Network *AskPlanNetwork `protobuf:"bytes,5,opt,name=network" json:"network,omitempty"`
}
func (m *AskPlanResources) Reset() { *m = AskPlanResources{} }
func (m *AskPlanResources) String() string { return proto.CompactTextString(m) }
func (*AskPlanResources) ProtoMessage() {}
func (*AskPlanResources) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *AskPlanResources) GetCPU() *AskPlanCPU {
if m != nil {
return m.CPU
}
return nil
}
func (m *AskPlanResources) GetRAM() *AskPlanRAM {
if m != nil {
return m.RAM
}
return nil
}
func (m *AskPlanResources) GetStorage() *AskPlanStorage {
if m != nil {
return m.Storage
}
return nil
}
func (m *AskPlanResources) GetGPU() *AskPlanGPU {
if m != nil {
return m.GPU
}
return nil
}
func (m *AskPlanResources) GetNetwork() *AskPlanNetwork {
if m != nil {
return m.Network
}
return nil
}
type AskPlan struct {
ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"`
OrderID *BigInt `protobuf:"bytes,2,opt,name=orderID" json:"orderID,omitempty"`
DealID *BigInt `protobuf:"bytes,3,opt,name=dealID" json:"dealID,omitempty"`
Duration *Duration `protobuf:"bytes,4,opt,name=duration" json:"duration,omitempty"`
Price *Price `protobuf:"bytes,5,opt,name=price" json:"price,omitempty"`
Blacklist *EthAddress `protobuf:"bytes,6,opt,name=blacklist" json:"blacklist,omitempty"`
Counterparty *EthAddress `protobuf:"bytes,7,opt,name=counterparty" json:"counterparty,omitempty"`
Identity IdentityLevel `protobuf:"varint,8,opt,name=identity,enum=sonm.IdentityLevel" json:"identity,omitempty"`
Tag []byte `protobuf:"bytes,9,opt,name=tag,proto3" json:"tag,omitempty"`
Resources *AskPlanResources `protobuf:"bytes,10,opt,name=resources" json:"resources,omitempty"`
Status AskPlan_Status `protobuf:"varint,11,opt,name=status,enum=sonm.AskPlan_Status" json:"status,omitempty"`
}
func (m *AskPlan) Reset() { *m = AskPlan{} }
func (m *AskPlan) String() string { return proto.CompactTextString(m) }
func (*AskPlan) ProtoMessage() {}
func (*AskPlan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *AskPlan) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *AskPlan) GetOrderID() *BigInt {
if m != nil {
return m.OrderID
}
return nil
}
func (m *AskPlan) GetDealID() *BigInt {
if m != nil {
return m.DealID
}
return nil
}
func (m *AskPlan) GetDuration() *Duration {
if m != nil {
return m.Duration
}
return nil
}
func (m *AskPlan) GetPrice() *Price {
if m != nil {
return m.Price
}
return nil
}
func (m *AskPlan) GetBlacklist() *EthAddress {
if m != nil {
return m.Blacklist
}
return nil
}
func (m *AskPlan) GetCounterparty() *EthAddress {
if m != nil {
return m.Counterparty
}
return nil
}
func (m *AskPlan) GetIdentity() IdentityLevel {
if m != nil {
return m.Identity
}
return IdentityLevel_ANONYMOUS
}
func (m *AskPlan) GetTag() []byte {
if m != nil {
return m.Tag
}
return nil
}
func (m *AskPlan) GetResources() *AskPlanResources {
if m != nil {
return m.Resources
}
return nil
}
func (m *AskPlan) GetStatus() AskPlan_Status {
if m != nil {
return m.Status
}
return AskPlan_ACTIVE
}
func init() {
proto.RegisterType((*AskPlanCPU)(nil), "sonm.AskPlanCPU")
proto.RegisterType((*AskPlanGPU)(nil), "sonm.AskPlanGPU")
proto.RegisterType((*AskPlanRAM)(nil), "sonm.AskPlanRAM")
proto.RegisterType((*AskPlanStorage)(nil), "sonm.AskPlanStorage")
proto.RegisterType((*AskPlanNetwork)(nil), "sonm.AskPlanNetwork")
proto.RegisterType((*AskPlanResources)(nil), "sonm.AskPlanResources")
proto.RegisterType((*AskPlan)(nil), "sonm.AskPlan")
proto.RegisterEnum("sonm.AskPlan_Status", AskPlan_Status_name, AskPlan_Status_value)
}
func init() { proto.RegisterFile("ask_plan.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 632 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x6a, 0xdb, 0x30,
0x14, 0x9e, 0x93, 0x34, 0x3f, 0x27, 0x69, 0x96, 0x69, 0xa5, 0x98, 0x5e, 0x65, 0xde, 0x18, 0xa1,
0x8c, 0x6c, 0xeb, 0xca, 0xd8, 0xd5, 0x20, 0xab, 0x43, 0x30, 0xb4, 0xa9, 0x51, 0x9b, 0xdd, 0x16,
0xc5, 0x16, 0x89, 0x88, 0x2b, 0x19, 0x49, 0xee, 0xd6, 0x3e, 0xe7, 0xae, 0xf7, 0x0a, 0x7b, 0x85,
0x21, 0x5b, 0x4e, 0xe6, 0x42, 0x61, 0x77, 0x3e, 0xdf, 0xcf, 0xf9, 0xd3, 0xc1, 0xd0, 0x27, 0x6a,
0x73, 0x93, 0x26, 0x84, 0x8f, 0x53, 0x29, 0xb4, 0x40, 0x0d, 0x25, 0xf8, 0xed, 0x51, 0x6f, 0xc9,
0x56, 0x8c, 0xeb, 0x02, 0x3b, 0x7a, 0xce, 0xb8, 0x41, 0x39, 0x23, 0x16, 0x78, 0x71, 0x4b, 0xe4,
0x86, 0xea, 0x34, 0x21, 0x11, 0x2d, 0x20, 0xef, 0x23, 0xc0, 0x44, 0x6d, 0xc2, 0x84, 0xf0, 0xb3,
0x70, 0x81, 0x5e, 0xc3, 0x7e, 0x24, 0x24, 0xbd, 0x49, 0xa9, 0x8c, 0x28, 0xd7, 0xca, 0x75, 0x86,
0xce, 0xa8, 0x81, 0x7b, 0x06, 0x0c, 0x2d, 0xe6, 0x7d, 0xdd, 0x5a, 0x66, 0xe1, 0x02, 0xb9, 0xd0,
0x62, 0x3c, 0xa6, 0x3f, 0xa9, 0x11, 0xd7, 0x47, 0x0d, 0x5c, 0x86, 0xe8, 0x10, 0x9a, 0x6b, 0xa2,
0xd6, 0x54, 0xb9, 0xb5, 0x61, 0x7d, 0xd4, 0xc1, 0x36, 0xf2, 0x3e, 0x6c, 0xfd, 0x78, 0x72, 0x81,
0x3c, 0x68, 0x28, 0xf6, 0x40, 0xf3, 0x4a, 0xdd, 0x93, 0xfe, 0xd8, 0x74, 0x3c, 0xf6, 0x89, 0x26,
0x57, 0xec, 0x81, 0xe2, 0x9c, 0xf3, 0x4e, 0xa1, 0x6f, 0x1d, 0x57, 0x5a, 0x48, 0xb2, 0xa2, 0xff,
0xe5, 0xfa, 0xe5, 0x6c, 0x6d, 0x73, 0xaa, 0x7f, 0x08, 0xb9, 0x41, 0x9f, 0xa1, 0xa7, 0xd7, 0x52,
0x64, 0xab, 0x75, 0x9a, 0xe9, 0x80, 0x5b, 0x3b, 0x7a, 0x64, 0x27, 0x9a, 0xe2, 0x8a, 0x0e, 0x7d,
0x81, 0xfd, 0x5d, 0x7c, 0x99, 0x69, 0xb7, 0xf6, 0xa4, 0xb1, 0x2a, 0x34, 0xeb, 0x11, 0x77, 0x54,
0x26, 0xe4, 0xde, 0xad, 0x0f, 0x9d, 0x51, 0x1b, 0x97, 0x21, 0x3a, 0x82, 0xb6, 0xc8, 0xf4, 0x52,
0x64, 0x3c, 0x76, 0x1b, 0x39, 0xb5, 0x8d, 0x0d, 0xc7, 0x78, 0x24, 0x6e, 0x19, 0x5f, 0xb9, 0x7b,
0x05, 0x57, 0xc6, 0xde, 0x6f, 0x07, 0x06, 0xe5, 0xfe, 0xa8, 0x12, 0x99, 0x8c, 0xa8, 0x42, 0x1e,
0xd4, 0xcf, 0xc2, 0x85, 0x9d, 0x67, 0x50, 0xb4, 0xb5, 0x7b, 0x57, 0x6c, 0x48, 0xa3, 0xc1, 0x93,
0x0b, 0xdb, 0x7a, 0x55, 0x83, 0x27, 0x17, 0xd8, 0x90, 0x68, 0x0c, 0x2d, 0x55, 0xac, 0x38, 0x6f,
0xb7, 0x7b, 0x72, 0x50, 0xd1, 0xd9, 0xf5, 0xe3, 0x52, 0x64, 0x72, 0xce, 0xc2, 0x45, 0xde, 0xff,
0xe3, 0x9c, 0x33, 0x53, 0xd7, 0x5c, 0xc8, 0x18, 0x5a, 0xbc, 0xd8, 0x7f, 0x3e, 0xcb, 0xe3, 0x9c,
0xf6, 0x6d, 0x70, 0x29, 0xf2, 0xfe, 0xd4, 0xa1, 0x65, 0x39, 0xd4, 0x87, 0x5a, 0xe0, 0xe7, 0x63,
0x75, 0x70, 0x2d, 0xf0, 0xd1, 0x5b, 0x68, 0x09, 0x19, 0x53, 0x19, 0xf8, 0x76, 0x8e, 0x5e, 0x91,
0xeb, 0x1b, 0x5b, 0x05, 0x5c, 0xe3, 0x92, 0x44, 0x6f, 0xa0, 0x19, 0x53, 0x92, 0x04, 0xbe, 0x1d,
0xa3, 0x2a, 0xb3, 0x1c, 0x3a, 0x86, 0x76, 0x9c, 0x49, 0xa2, 0x99, 0xe0, 0x76, 0x84, 0xf2, 0x92,
0x2c, 0x8a, 0xb7, 0x3c, 0x7a, 0x05, 0x7b, 0xa9, 0x64, 0x11, 0xb5, 0x33, 0x74, 0x0b, 0x61, 0x68,
0x20, 0x5c, 0x30, 0x68, 0x0c, 0x9d, 0x65, 0x42, 0xa2, 0x4d, 0xc2, 0x94, 0x76, 0x9b, 0xff, 0xae,
0x64, 0xaa, 0xd7, 0x93, 0x38, 0x96, 0x54, 0x29, 0xbc, 0x93, 0xa0, 0x53, 0xe8, 0x45, 0x22, 0xe3,
0x9a, 0xca, 0x94, 0x48, 0x7d, 0xef, 0xb6, 0x9e, 0xb0, 0x54, 0x54, 0xe8, 0x3d, 0xb4, 0x59, 0x4c,
0xb9, 0x66, 0xfa, 0xde, 0x6d, 0x0f, 0x9d, 0x51, 0xff, 0xe4, 0x65, 0xe1, 0x08, 0x2c, 0x7a, 0x4e,
0xef, 0x68, 0x82, 0xb7, 0x22, 0x34, 0x80, 0xba, 0x26, 0x2b, 0xb7, 0x33, 0x74, 0x46, 0x3d, 0x6c,
0x3e, 0xd1, 0x29, 0x74, 0x64, 0x79, 0x3a, 0x2e, 0xe4, 0x55, 0x0f, 0xab, 0xf7, 0x50, 0xb2, 0x78,
0x27, 0x44, 0xef, 0xa0, 0xa9, 0x34, 0xd1, 0x99, 0x72, 0xbb, 0x79, 0xd9, 0xea, 0x33, 0x8e, 0xaf,
0x72, 0x0e, 0x5b, 0x8d, 0x77, 0x0c, 0xcd, 0x02, 0x41, 0x00, 0xcd, 0xc9, 0xd9, 0x75, 0xf0, 0x7d,
0x3a, 0x78, 0x86, 0x0e, 0x60, 0x10, 0x4e, 0xe7, 0x7e, 0x30, 0x9f, 0xdd, 0xf8, 0xd3, 0xf3, 0xe9,
0x75, 0x70, 0x39, 0x1f, 0x38, 0xcb, 0x66, 0xfe, 0x2f, 0xfa, 0xf4, 0x37, 0x00, 0x00, 0xff, 0xff,
0xbd, 0xd9, 0x4e, 0xf0, 0xd5, 0x04, 0x00, 0x00,
} | return nil
} | random_line_split |
ask_plan.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: ask_plan.proto
/*
Package sonm is a generated protocol buffer package.
It is generated from these files:
ask_plan.proto
benchmarks.proto
bigint.proto
capabilities.proto
container.proto
dwh.proto
insonmnia.proto
marketplace.proto
net.proto
node.proto
relay.proto
rendezvous.proto
timestamp.proto
volume.proto
worker.proto
It has these top-level messages:
AskPlanCPU
AskPlanGPU
AskPlanRAM
AskPlanStorage
AskPlanNetwork
AskPlanResources
AskPlan
Benchmark
BigInt
CPUDevice
CPU
RAMDevice
RAM
GPUDevice
GPU
Network
StorageDevice
Storage
NetworkSpec
Container
SortingOption
DealsRequest
DWHDealsReply
DWHDeal
DealConditionsRequest
DealConditionsReply
OrdersRequest
MatchingOrdersRequest
DWHOrdersReply
DWHOrder
DealCondition
DWHWorker
ProfilesRequest
ProfilesReply
Profile
BlacklistRequest
BlacklistReply
ValidatorsRequest
ValidatorsReply
Validator
DealChangeRequestsReply
DealChangeRequest
DealPayment
WorkersRequest
WorkersReply
Certificate
MaxMinUint64
MaxMinBig
MaxMinTimestamp
CmpUint64
BlacklistQuery
Empty
ID
EthID
TaskID
Count
CPUUsage
MemoryUsage
NetworkUsage
ResourceUsage
ContainerRestartPolicy
TaskLogsRequest
TaskLogsChunk
TaskResourceRequirements
Chunk
Progress
Duration
EthAddress
DataSize
DataSizeRate
Price
GetOrdersReply
Benchmarks
Deal
Order
BidNetwork
BidResources
BidOrder
Addr
SocketAddr
Endpoints
JoinNetworkRequest
TaskListRequest
DealFinishRequest
DealsReply
OpenDealRequest
WorkerRemoveRequest
WorkerListReply
BalanceReply
HandshakeRequest
DiscoverResponse
HandshakeResponse
RelayClusterReply
RelayMetrics
NetMetrics
ConnectRequest
PublishRequest
RendezvousReply
RendezvousState
RendezvousMeeting
ResolveMetaReply
Timestamp
Volume
StartTaskRequest
WorkerJoinNetworkRequest
StartTaskReply
StatusReply
AskPlansReply
TaskListReply
DevicesReply
PullTaskRequest
DealInfoReply
TaskStatusReply
StatusMapReply
*/
package sonm
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type AskPlan_Status int32
const (
AskPlan_ACTIVE AskPlan_Status = 0
AskPlan_PENDING_DELETION AskPlan_Status = 1
)
var AskPlan_Status_name = map[int32]string{
0: "ACTIVE",
1: "PENDING_DELETION",
}
var AskPlan_Status_value = map[string]int32{
"ACTIVE": 0,
"PENDING_DELETION": 1,
}
func (x AskPlan_Status) String() string {
return proto.EnumName(AskPlan_Status_name, int32(x))
}
func (AskPlan_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} }
type AskPlanCPU struct {
CorePercents uint64 `protobuf:"varint,1,opt,name=core_percents,json=corePercents" json:"core_percents,omitempty"`
}
func (m *AskPlanCPU) Reset() { *m = AskPlanCPU{} }
func (m *AskPlanCPU) String() string { return proto.CompactTextString(m) }
func (*AskPlanCPU) ProtoMessage() {}
func (*AskPlanCPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *AskPlanCPU) GetCorePercents() uint64 {
if m != nil {
return m.CorePercents
}
return 0
}
type AskPlanGPU struct {
Indexes []uint64 `protobuf:"varint,1,rep,packed,name=indexes" json:"indexes,omitempty"`
Hashes []string `protobuf:"bytes,2,rep,name=hashes" json:"hashes,omitempty"`
}
func (m *AskPlanGPU) Reset() { *m = AskPlanGPU{} }
func (m *AskPlanGPU) String() string { return proto.CompactTextString(m) }
func (*AskPlanGPU) ProtoMessage() {}
func (*AskPlanGPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *AskPlanGPU) GetIndexes() []uint64 {
if m != nil {
return m.Indexes
}
return nil
}
func (m *AskPlanGPU) GetHashes() []string {
if m != nil {
return m.Hashes
}
return nil
}
type AskPlanRAM struct {
Size *DataSize `protobuf:"bytes,1,opt,name=size" json:"size,omitempty"`
}
func (m *AskPlanRAM) Reset() { *m = AskPlanRAM{} }
func (m *AskPlanRAM) String() string { return proto.CompactTextString(m) }
func (*AskPlanRAM) ProtoMessage() {}
func (*AskPlanRAM) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *AskPlanRAM) GetSize() *DataSize {
if m != nil {
return m.Size
}
return nil
}
type AskPlanStorage struct {
Size *DataSize `protobuf:"bytes,1,opt,name=size" json:"size,omitempty"`
}
func (m *AskPlanStorage) Reset() { *m = AskPlanStorage{} }
func (m *AskPlanStorage) String() string { return proto.CompactTextString(m) }
func (*AskPlanStorage) ProtoMessage() {}
func (*AskPlanStorage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *AskPlanStorage) GetSize() *DataSize {
if m != nil {
return m.Size
}
return nil
}
type AskPlanNetwork struct {
ThroughputIn *DataSizeRate `protobuf:"bytes,1,opt,name=throughputIn" json:"throughputIn,omitempty"`
ThroughputOut *DataSizeRate `protobuf:"bytes,2,opt,name=throughputOut" json:"throughputOut,omitempty"`
Overlay bool `protobuf:"varint,3,opt,name=overlay" json:"overlay,omitempty"`
Outbound bool `protobuf:"varint,4,opt,name=outbound" json:"outbound,omitempty"`
Incoming bool `protobuf:"varint,5,opt,name=incoming" json:"incoming,omitempty"`
}
func (m *AskPlanNetwork) Reset() { *m = AskPlanNetwork{} }
func (m *AskPlanNetwork) String() string { return proto.CompactTextString(m) }
func (*AskPlanNetwork) ProtoMessage() {}
func (*AskPlanNetwork) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *AskPlanNetwork) GetThroughputIn() *DataSizeRate {
if m != nil {
return m.ThroughputIn
}
return nil
}
func (m *AskPlanNetwork) GetThroughputOut() *DataSizeRate {
if m != nil {
return m.ThroughputOut
}
return nil
}
func (m *AskPlanNetwork) GetOverlay() bool {
if m != nil {
return m.Overlay
}
return false
}
func (m *AskPlanNetwork) GetOutbound() bool {
if m != nil {
return m.Outbound
}
return false
}
func (m *AskPlanNetwork) GetIncoming() bool {
if m != nil {
return m.Incoming
}
return false
}
type AskPlanResources struct {
CPU *AskPlanCPU `protobuf:"bytes,1,opt,name=CPU" json:"CPU,omitempty"`
RAM *AskPlanRAM `protobuf:"bytes,2,opt,name=RAM" json:"RAM,omitempty"`
Storage *AskPlanStorage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
GPU *AskPlanGPU `protobuf:"bytes,4,opt,name=GPU" json:"GPU,omitempty"`
Network *AskPlanNetwork `protobuf:"bytes,5,opt,name=network" json:"network,omitempty"`
}
func (m *AskPlanResources) Reset() { *m = AskPlanResources{} }
func (m *AskPlanResources) String() string { return proto.CompactTextString(m) }
func (*AskPlanResources) ProtoMessage() {}
func (*AskPlanResources) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *AskPlanResources) GetCPU() *AskPlanCPU {
if m != nil {
return m.CPU
}
return nil
}
func (m *AskPlanResources) GetRAM() *AskPlanRAM {
if m != nil {
return m.RAM
}
return nil
}
func (m *AskPlanResources) GetStorage() *AskPlanStorage {
if m != nil {
return m.Storage
}
return nil
}
func (m *AskPlanResources) GetGPU() *AskPlanGPU {
if m != nil {
return m.GPU
}
return nil
}
func (m *AskPlanResources) GetNetwork() *AskPlanNetwork {
if m != nil {
return m.Network
}
return nil
}
type AskPlan struct {
ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"`
OrderID *BigInt `protobuf:"bytes,2,opt,name=orderID" json:"orderID,omitempty"`
DealID *BigInt `protobuf:"bytes,3,opt,name=dealID" json:"dealID,omitempty"`
Duration *Duration `protobuf:"bytes,4,opt,name=duration" json:"duration,omitempty"`
Price *Price `protobuf:"bytes,5,opt,name=price" json:"price,omitempty"`
Blacklist *EthAddress `protobuf:"bytes,6,opt,name=blacklist" json:"blacklist,omitempty"`
Counterparty *EthAddress `protobuf:"bytes,7,opt,name=counterparty" json:"counterparty,omitempty"`
Identity IdentityLevel `protobuf:"varint,8,opt,name=identity,enum=sonm.IdentityLevel" json:"identity,omitempty"`
Tag []byte `protobuf:"bytes,9,opt,name=tag,proto3" json:"tag,omitempty"`
Resources *AskPlanResources `protobuf:"bytes,10,opt,name=resources" json:"resources,omitempty"`
Status AskPlan_Status `protobuf:"varint,11,opt,name=status,enum=sonm.AskPlan_Status" json:"status,omitempty"`
}
func (m *AskPlan) Reset() { *m = AskPlan{} }
func (m *AskPlan) | () string { return proto.CompactTextString(m) }
func (*AskPlan) ProtoMessage() {}
func (*AskPlan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *AskPlan) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *AskPlan) GetOrderID() *BigInt {
if m != nil {
return m.OrderID
}
return nil
}
func (m *AskPlan) GetDealID() *BigInt {
if m != nil {
return m.DealID
}
return nil
}
func (m *AskPlan) GetDuration() *Duration {
if m != nil {
return m.Duration
}
return nil
}
func (m *AskPlan) GetPrice() *Price {
if m != nil {
return m.Price
}
return nil
}
func (m *AskPlan) GetBlacklist() *EthAddress {
if m != nil {
return m.Blacklist
}
return nil
}
func (m *AskPlan) GetCounterparty() *EthAddress {
if m != nil {
return m.Counterparty
}
return nil
}
func (m *AskPlan) GetIdentity() IdentityLevel {
if m != nil {
return m.Identity
}
return IdentityLevel_ANONYMOUS
}
func (m *AskPlan) GetTag() []byte {
if m != nil {
return m.Tag
}
return nil
}
func (m *AskPlan) GetResources() *AskPlanResources {
if m != nil {
return m.Resources
}
return nil
}
func (m *AskPlan) GetStatus() AskPlan_Status {
if m != nil {
return m.Status
}
return AskPlan_ACTIVE
}
func init() {
proto.RegisterType((*AskPlanCPU)(nil), "sonm.AskPlanCPU")
proto.RegisterType((*AskPlanGPU)(nil), "sonm.AskPlanGPU")
proto.RegisterType((*AskPlanRAM)(nil), "sonm.AskPlanRAM")
proto.RegisterType((*AskPlanStorage)(nil), "sonm.AskPlanStorage")
proto.RegisterType((*AskPlanNetwork)(nil), "sonm.AskPlanNetwork")
proto.RegisterType((*AskPlanResources)(nil), "sonm.AskPlanResources")
proto.RegisterType((*AskPlan)(nil), "sonm.AskPlan")
proto.RegisterEnum("sonm.AskPlan_Status", AskPlan_Status_name, AskPlan_Status_value)
}
func init() { proto.RegisterFile("ask_plan.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 632 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x6a, 0xdb, 0x30,
0x14, 0x9e, 0x93, 0x34, 0x3f, 0x27, 0x69, 0x96, 0x69, 0xa5, 0x98, 0x5e, 0x65, 0xde, 0x18, 0xa1,
0x8c, 0x6c, 0xeb, 0xca, 0xd8, 0xd5, 0x20, 0xab, 0x43, 0x30, 0xb4, 0xa9, 0x51, 0x9b, 0xdd, 0x16,
0xc5, 0x16, 0x89, 0x88, 0x2b, 0x19, 0x49, 0xee, 0xd6, 0x3e, 0xe7, 0xae, 0xf7, 0x0a, 0x7b, 0x85,
0x21, 0x5b, 0x4e, 0xe6, 0x42, 0x61, 0x77, 0x3e, 0xdf, 0xcf, 0xf9, 0xd3, 0xc1, 0xd0, 0x27, 0x6a,
0x73, 0x93, 0x26, 0x84, 0x8f, 0x53, 0x29, 0xb4, 0x40, 0x0d, 0x25, 0xf8, 0xed, 0x51, 0x6f, 0xc9,
0x56, 0x8c, 0xeb, 0x02, 0x3b, 0x7a, 0xce, 0xb8, 0x41, 0x39, 0x23, 0x16, 0x78, 0x71, 0x4b, 0xe4,
0x86, 0xea, 0x34, 0x21, 0x11, 0x2d, 0x20, 0xef, 0x23, 0xc0, 0x44, 0x6d, 0xc2, 0x84, 0xf0, 0xb3,
0x70, 0x81, 0x5e, 0xc3, 0x7e, 0x24, 0x24, 0xbd, 0x49, 0xa9, 0x8c, 0x28, 0xd7, 0xca, 0x75, 0x86,
0xce, 0xa8, 0x81, 0x7b, 0x06, 0x0c, 0x2d, 0xe6, 0x7d, 0xdd, 0x5a, 0x66, 0xe1, 0x02, 0xb9, 0xd0,
0x62, 0x3c, 0xa6, 0x3f, 0xa9, 0x11, 0xd7, 0x47, 0x0d, 0x5c, 0x86, 0xe8, 0x10, 0x9a, 0x6b, 0xa2,
0xd6, 0x54, 0xb9, 0xb5, 0x61, 0x7d, 0xd4, 0xc1, 0x36, 0xf2, 0x3e, 0x6c, 0xfd, 0x78, 0x72, 0x81,
0x3c, 0x68, 0x28, 0xf6, 0x40, 0xf3, 0x4a, 0xdd, 0x93, 0xfe, 0xd8, 0x74, 0x3c, 0xf6, 0x89, 0x26,
0x57, 0xec, 0x81, 0xe2, 0x9c, 0xf3, 0x4e, 0xa1, 0x6f, 0x1d, 0x57, 0x5a, 0x48, 0xb2, 0xa2, 0xff,
0xe5, 0xfa, 0xe5, 0x6c, 0x6d, 0x73, 0xaa, 0x7f, 0x08, 0xb9, 0x41, 0x9f, 0xa1, 0xa7, 0xd7, 0x52,
0x64, 0xab, 0x75, 0x9a, 0xe9, 0x80, 0x5b, 0x3b, 0x7a, 0x64, 0x27, 0x9a, 0xe2, 0x8a, 0x0e, 0x7d,
0x81, 0xfd, 0x5d, 0x7c, 0x99, 0x69, 0xb7, 0xf6, 0xa4, 0xb1, 0x2a, 0x34, 0xeb, 0x11, 0x77, 0x54,
0x26, 0xe4, 0xde, 0xad, 0x0f, 0x9d, 0x51, 0x1b, 0x97, 0x21, 0x3a, 0x82, 0xb6, 0xc8, 0xf4, 0x52,
0x64, 0x3c, 0x76, 0x1b, 0x39, 0xb5, 0x8d, 0x0d, 0xc7, 0x78, 0x24, 0x6e, 0x19, 0x5f, 0xb9, 0x7b,
0x05, 0x57, 0xc6, 0xde, 0x6f, 0x07, 0x06, 0xe5, 0xfe, 0xa8, 0x12, 0x99, 0x8c, 0xa8, 0x42, 0x1e,
0xd4, 0xcf, 0xc2, 0x85, 0x9d, 0x67, 0x50, 0xb4, 0xb5, 0x7b, 0x57, 0x6c, 0x48, 0xa3, 0xc1, 0x93,
0x0b, 0xdb, 0x7a, 0x55, 0x83, 0x27, 0x17, 0xd8, 0x90, 0x68, 0x0c, 0x2d, 0x55, 0xac, 0x38, 0x6f,
0xb7, 0x7b, 0x72, 0x50, 0xd1, 0xd9, 0xf5, 0xe3, 0x52, 0x64, 0x72, 0xce, 0xc2, 0x45, 0xde, 0xff,
0xe3, 0x9c, 0x33, 0x53, 0xd7, 0x5c, 0xc8, 0x18, 0x5a, 0xbc, 0xd8, 0x7f, 0x3e, 0xcb, 0xe3, 0x9c,
0xf6, 0x6d, 0x70, 0x29, 0xf2, 0xfe, 0xd4, 0xa1, 0x65, 0x39, 0xd4, 0x87, 0x5a, 0xe0, 0xe7, 0x63,
0x75, 0x70, 0x2d, 0xf0, 0xd1, 0x5b, 0x68, 0x09, 0x19, 0x53, 0x19, 0xf8, 0x76, 0x8e, 0x5e, 0x91,
0xeb, 0x1b, 0x5b, 0x05, 0x5c, 0xe3, 0x92, 0x44, 0x6f, 0xa0, 0x19, 0x53, 0x92, 0x04, 0xbe, 0x1d,
0xa3, 0x2a, 0xb3, 0x1c, 0x3a, 0x86, 0x76, 0x9c, 0x49, 0xa2, 0x99, 0xe0, 0x76, 0x84, 0xf2, 0x92,
0x2c, 0x8a, 0xb7, 0x3c, 0x7a, 0x05, 0x7b, 0xa9, 0x64, 0x11, 0xb5, 0x33, 0x74, 0x0b, 0x61, 0x68,
0x20, 0x5c, 0x30, 0x68, 0x0c, 0x9d, 0x65, 0x42, 0xa2, 0x4d, 0xc2, 0x94, 0x76, 0x9b, 0xff, 0xae,
0x64, 0xaa, 0xd7, 0x93, 0x38, 0x96, 0x54, 0x29, 0xbc, 0x93, 0xa0, 0x53, 0xe8, 0x45, 0x22, 0xe3,
0x9a, 0xca, 0x94, 0x48, 0x7d, 0xef, 0xb6, 0x9e, 0xb0, 0x54, 0x54, 0xe8, 0x3d, 0xb4, 0x59, 0x4c,
0xb9, 0x66, 0xfa, 0xde, 0x6d, 0x0f, 0x9d, 0x51, 0xff, 0xe4, 0x65, 0xe1, 0x08, 0x2c, 0x7a, 0x4e,
0xef, 0x68, 0x82, 0xb7, 0x22, 0x34, 0x80, 0xba, 0x26, 0x2b, 0xb7, 0x33, 0x74, 0x46, 0x3d, 0x6c,
0x3e, 0xd1, 0x29, 0x74, 0x64, 0x79, 0x3a, 0x2e, 0xe4, 0x55, 0x0f, 0xab, 0xf7, 0x50, 0xb2, 0x78,
0x27, 0x44, 0xef, 0xa0, 0xa9, 0x34, 0xd1, 0x99, 0x72, 0xbb, 0x79, 0xd9, 0xea, 0x33, 0x8e, 0xaf,
0x72, 0x0e, 0x5b, 0x8d, 0x77, 0x0c, 0xcd, 0x02, 0x41, 0x00, 0xcd, 0xc9, 0xd9, 0x75, 0xf0, 0x7d,
0x3a, 0x78, 0x86, 0x0e, 0x60, 0x10, 0x4e, 0xe7, 0x7e, 0x30, 0x9f, 0xdd, 0xf8, 0xd3, 0xf3, 0xe9,
0x75, 0x70, 0x39, 0x1f, 0x38, 0xcb, 0x66, 0xfe, 0x2f, 0xfa, 0xf4, 0x37, 0x00, 0x00, 0xff, 0xff,
0xbd, 0xd9, 0x4e, 0xf0, 0xd5, 0x04, 0x00, 0x00,
}
| String | identifier_name |
ask_plan.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: ask_plan.proto
/*
Package sonm is a generated protocol buffer package.
It is generated from these files:
ask_plan.proto
benchmarks.proto
bigint.proto
capabilities.proto
container.proto
dwh.proto
insonmnia.proto
marketplace.proto
net.proto
node.proto
relay.proto
rendezvous.proto
timestamp.proto
volume.proto
worker.proto
It has these top-level messages:
AskPlanCPU
AskPlanGPU
AskPlanRAM
AskPlanStorage
AskPlanNetwork
AskPlanResources
AskPlan
Benchmark
BigInt
CPUDevice
CPU
RAMDevice
RAM
GPUDevice
GPU
Network
StorageDevice
Storage
NetworkSpec
Container
SortingOption
DealsRequest
DWHDealsReply
DWHDeal
DealConditionsRequest
DealConditionsReply
OrdersRequest
MatchingOrdersRequest
DWHOrdersReply
DWHOrder
DealCondition
DWHWorker
ProfilesRequest
ProfilesReply
Profile
BlacklistRequest
BlacklistReply
ValidatorsRequest
ValidatorsReply
Validator
DealChangeRequestsReply
DealChangeRequest
DealPayment
WorkersRequest
WorkersReply
Certificate
MaxMinUint64
MaxMinBig
MaxMinTimestamp
CmpUint64
BlacklistQuery
Empty
ID
EthID
TaskID
Count
CPUUsage
MemoryUsage
NetworkUsage
ResourceUsage
ContainerRestartPolicy
TaskLogsRequest
TaskLogsChunk
TaskResourceRequirements
Chunk
Progress
Duration
EthAddress
DataSize
DataSizeRate
Price
GetOrdersReply
Benchmarks
Deal
Order
BidNetwork
BidResources
BidOrder
Addr
SocketAddr
Endpoints
JoinNetworkRequest
TaskListRequest
DealFinishRequest
DealsReply
OpenDealRequest
WorkerRemoveRequest
WorkerListReply
BalanceReply
HandshakeRequest
DiscoverResponse
HandshakeResponse
RelayClusterReply
RelayMetrics
NetMetrics
ConnectRequest
PublishRequest
RendezvousReply
RendezvousState
RendezvousMeeting
ResolveMetaReply
Timestamp
Volume
StartTaskRequest
WorkerJoinNetworkRequest
StartTaskReply
StatusReply
AskPlansReply
TaskListReply
DevicesReply
PullTaskRequest
DealInfoReply
TaskStatusReply
StatusMapReply
*/
package sonm
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type AskPlan_Status int32
const (
AskPlan_ACTIVE AskPlan_Status = 0
AskPlan_PENDING_DELETION AskPlan_Status = 1
)
var AskPlan_Status_name = map[int32]string{
0: "ACTIVE",
1: "PENDING_DELETION",
}
var AskPlan_Status_value = map[string]int32{
"ACTIVE": 0,
"PENDING_DELETION": 1,
}
func (x AskPlan_Status) String() string {
return proto.EnumName(AskPlan_Status_name, int32(x))
}
func (AskPlan_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} }
type AskPlanCPU struct {
CorePercents uint64 `protobuf:"varint,1,opt,name=core_percents,json=corePercents" json:"core_percents,omitempty"`
}
func (m *AskPlanCPU) Reset() { *m = AskPlanCPU{} }
func (m *AskPlanCPU) String() string { return proto.CompactTextString(m) }
func (*AskPlanCPU) ProtoMessage() {}
func (*AskPlanCPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *AskPlanCPU) GetCorePercents() uint64 {
if m != nil {
return m.CorePercents
}
return 0
}
type AskPlanGPU struct {
Indexes []uint64 `protobuf:"varint,1,rep,packed,name=indexes" json:"indexes,omitempty"`
Hashes []string `protobuf:"bytes,2,rep,name=hashes" json:"hashes,omitempty"`
}
func (m *AskPlanGPU) Reset() { *m = AskPlanGPU{} }
func (m *AskPlanGPU) String() string { return proto.CompactTextString(m) }
func (*AskPlanGPU) ProtoMessage() {}
func (*AskPlanGPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *AskPlanGPU) GetIndexes() []uint64 {
if m != nil {
return m.Indexes
}
return nil
}
func (m *AskPlanGPU) GetHashes() []string {
if m != nil {
return m.Hashes
}
return nil
}
type AskPlanRAM struct {
Size *DataSize `protobuf:"bytes,1,opt,name=size" json:"size,omitempty"`
}
func (m *AskPlanRAM) Reset() { *m = AskPlanRAM{} }
func (m *AskPlanRAM) String() string { return proto.CompactTextString(m) }
func (*AskPlanRAM) ProtoMessage() {}
func (*AskPlanRAM) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *AskPlanRAM) GetSize() *DataSize {
if m != nil {
return m.Size
}
return nil
}
type AskPlanStorage struct {
Size *DataSize `protobuf:"bytes,1,opt,name=size" json:"size,omitempty"`
}
func (m *AskPlanStorage) Reset() { *m = AskPlanStorage{} }
func (m *AskPlanStorage) String() string |
func (*AskPlanStorage) ProtoMessage() {}
func (*AskPlanStorage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *AskPlanStorage) GetSize() *DataSize {
if m != nil {
return m.Size
}
return nil
}
type AskPlanNetwork struct {
ThroughputIn *DataSizeRate `protobuf:"bytes,1,opt,name=throughputIn" json:"throughputIn,omitempty"`
ThroughputOut *DataSizeRate `protobuf:"bytes,2,opt,name=throughputOut" json:"throughputOut,omitempty"`
Overlay bool `protobuf:"varint,3,opt,name=overlay" json:"overlay,omitempty"`
Outbound bool `protobuf:"varint,4,opt,name=outbound" json:"outbound,omitempty"`
Incoming bool `protobuf:"varint,5,opt,name=incoming" json:"incoming,omitempty"`
}
func (m *AskPlanNetwork) Reset() { *m = AskPlanNetwork{} }
func (m *AskPlanNetwork) String() string { return proto.CompactTextString(m) }
func (*AskPlanNetwork) ProtoMessage() {}
func (*AskPlanNetwork) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *AskPlanNetwork) GetThroughputIn() *DataSizeRate {
if m != nil {
return m.ThroughputIn
}
return nil
}
func (m *AskPlanNetwork) GetThroughputOut() *DataSizeRate {
if m != nil {
return m.ThroughputOut
}
return nil
}
func (m *AskPlanNetwork) GetOverlay() bool {
if m != nil {
return m.Overlay
}
return false
}
func (m *AskPlanNetwork) GetOutbound() bool {
if m != nil {
return m.Outbound
}
return false
}
func (m *AskPlanNetwork) GetIncoming() bool {
if m != nil {
return m.Incoming
}
return false
}
type AskPlanResources struct {
CPU *AskPlanCPU `protobuf:"bytes,1,opt,name=CPU" json:"CPU,omitempty"`
RAM *AskPlanRAM `protobuf:"bytes,2,opt,name=RAM" json:"RAM,omitempty"`
Storage *AskPlanStorage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
GPU *AskPlanGPU `protobuf:"bytes,4,opt,name=GPU" json:"GPU,omitempty"`
Network *AskPlanNetwork `protobuf:"bytes,5,opt,name=network" json:"network,omitempty"`
}
func (m *AskPlanResources) Reset() { *m = AskPlanResources{} }
func (m *AskPlanResources) String() string { return proto.CompactTextString(m) }
func (*AskPlanResources) ProtoMessage() {}
func (*AskPlanResources) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *AskPlanResources) GetCPU() *AskPlanCPU {
if m != nil {
return m.CPU
}
return nil
}
func (m *AskPlanResources) GetRAM() *AskPlanRAM {
if m != nil {
return m.RAM
}
return nil
}
func (m *AskPlanResources) GetStorage() *AskPlanStorage {
if m != nil {
return m.Storage
}
return nil
}
func (m *AskPlanResources) GetGPU() *AskPlanGPU {
if m != nil {
return m.GPU
}
return nil
}
func (m *AskPlanResources) GetNetwork() *AskPlanNetwork {
if m != nil {
return m.Network
}
return nil
}
type AskPlan struct {
ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"`
OrderID *BigInt `protobuf:"bytes,2,opt,name=orderID" json:"orderID,omitempty"`
DealID *BigInt `protobuf:"bytes,3,opt,name=dealID" json:"dealID,omitempty"`
Duration *Duration `protobuf:"bytes,4,opt,name=duration" json:"duration,omitempty"`
Price *Price `protobuf:"bytes,5,opt,name=price" json:"price,omitempty"`
Blacklist *EthAddress `protobuf:"bytes,6,opt,name=blacklist" json:"blacklist,omitempty"`
Counterparty *EthAddress `protobuf:"bytes,7,opt,name=counterparty" json:"counterparty,omitempty"`
Identity IdentityLevel `protobuf:"varint,8,opt,name=identity,enum=sonm.IdentityLevel" json:"identity,omitempty"`
Tag []byte `protobuf:"bytes,9,opt,name=tag,proto3" json:"tag,omitempty"`
Resources *AskPlanResources `protobuf:"bytes,10,opt,name=resources" json:"resources,omitempty"`
Status AskPlan_Status `protobuf:"varint,11,opt,name=status,enum=sonm.AskPlan_Status" json:"status,omitempty"`
}
func (m *AskPlan) Reset() { *m = AskPlan{} }
func (m *AskPlan) String() string { return proto.CompactTextString(m) }
func (*AskPlan) ProtoMessage() {}
func (*AskPlan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *AskPlan) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *AskPlan) GetOrderID() *BigInt {
if m != nil {
return m.OrderID
}
return nil
}
func (m *AskPlan) GetDealID() *BigInt {
if m != nil {
return m.DealID
}
return nil
}
func (m *AskPlan) GetDuration() *Duration {
if m != nil {
return m.Duration
}
return nil
}
func (m *AskPlan) GetPrice() *Price {
if m != nil {
return m.Price
}
return nil
}
func (m *AskPlan) GetBlacklist() *EthAddress {
if m != nil {
return m.Blacklist
}
return nil
}
func (m *AskPlan) GetCounterparty() *EthAddress {
if m != nil {
return m.Counterparty
}
return nil
}
func (m *AskPlan) GetIdentity() IdentityLevel {
if m != nil {
return m.Identity
}
return IdentityLevel_ANONYMOUS
}
func (m *AskPlan) GetTag() []byte {
if m != nil {
return m.Tag
}
return nil
}
func (m *AskPlan) GetResources() *AskPlanResources {
if m != nil {
return m.Resources
}
return nil
}
func (m *AskPlan) GetStatus() AskPlan_Status {
if m != nil {
return m.Status
}
return AskPlan_ACTIVE
}
func init() {
proto.RegisterType((*AskPlanCPU)(nil), "sonm.AskPlanCPU")
proto.RegisterType((*AskPlanGPU)(nil), "sonm.AskPlanGPU")
proto.RegisterType((*AskPlanRAM)(nil), "sonm.AskPlanRAM")
proto.RegisterType((*AskPlanStorage)(nil), "sonm.AskPlanStorage")
proto.RegisterType((*AskPlanNetwork)(nil), "sonm.AskPlanNetwork")
proto.RegisterType((*AskPlanResources)(nil), "sonm.AskPlanResources")
proto.RegisterType((*AskPlan)(nil), "sonm.AskPlan")
proto.RegisterEnum("sonm.AskPlan_Status", AskPlan_Status_name, AskPlan_Status_value)
}
func init() { proto.RegisterFile("ask_plan.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 632 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x6a, 0xdb, 0x30,
0x14, 0x9e, 0x93, 0x34, 0x3f, 0x27, 0x69, 0x96, 0x69, 0xa5, 0x98, 0x5e, 0x65, 0xde, 0x18, 0xa1,
0x8c, 0x6c, 0xeb, 0xca, 0xd8, 0xd5, 0x20, 0xab, 0x43, 0x30, 0xb4, 0xa9, 0x51, 0x9b, 0xdd, 0x16,
0xc5, 0x16, 0x89, 0x88, 0x2b, 0x19, 0x49, 0xee, 0xd6, 0x3e, 0xe7, 0xae, 0xf7, 0x0a, 0x7b, 0x85,
0x21, 0x5b, 0x4e, 0xe6, 0x42, 0x61, 0x77, 0x3e, 0xdf, 0xcf, 0xf9, 0xd3, 0xc1, 0xd0, 0x27, 0x6a,
0x73, 0x93, 0x26, 0x84, 0x8f, 0x53, 0x29, 0xb4, 0x40, 0x0d, 0x25, 0xf8, 0xed, 0x51, 0x6f, 0xc9,
0x56, 0x8c, 0xeb, 0x02, 0x3b, 0x7a, 0xce, 0xb8, 0x41, 0x39, 0x23, 0x16, 0x78, 0x71, 0x4b, 0xe4,
0x86, 0xea, 0x34, 0x21, 0x11, 0x2d, 0x20, 0xef, 0x23, 0xc0, 0x44, 0x6d, 0xc2, 0x84, 0xf0, 0xb3,
0x70, 0x81, 0x5e, 0xc3, 0x7e, 0x24, 0x24, 0xbd, 0x49, 0xa9, 0x8c, 0x28, 0xd7, 0xca, 0x75, 0x86,
0xce, 0xa8, 0x81, 0x7b, 0x06, 0x0c, 0x2d, 0xe6, 0x7d, 0xdd, 0x5a, 0x66, 0xe1, 0x02, 0xb9, 0xd0,
0x62, 0x3c, 0xa6, 0x3f, 0xa9, 0x11, 0xd7, 0x47, 0x0d, 0x5c, 0x86, 0xe8, 0x10, 0x9a, 0x6b, 0xa2,
0xd6, 0x54, 0xb9, 0xb5, 0x61, 0x7d, 0xd4, 0xc1, 0x36, 0xf2, 0x3e, 0x6c, 0xfd, 0x78, 0x72, 0x81,
0x3c, 0x68, 0x28, 0xf6, 0x40, 0xf3, 0x4a, 0xdd, 0x93, 0xfe, 0xd8, 0x74, 0x3c, 0xf6, 0x89, 0x26,
0x57, 0xec, 0x81, 0xe2, 0x9c, 0xf3, 0x4e, 0xa1, 0x6f, 0x1d, 0x57, 0x5a, 0x48, 0xb2, 0xa2, 0xff,
0xe5, 0xfa, 0xe5, 0x6c, 0x6d, 0x73, 0xaa, 0x7f, 0x08, 0xb9, 0x41, 0x9f, 0xa1, 0xa7, 0xd7, 0x52,
0x64, 0xab, 0x75, 0x9a, 0xe9, 0x80, 0x5b, 0x3b, 0x7a, 0x64, 0x27, 0x9a, 0xe2, 0x8a, 0x0e, 0x7d,
0x81, 0xfd, 0x5d, 0x7c, 0x99, 0x69, 0xb7, 0xf6, 0xa4, 0xb1, 0x2a, 0x34, 0xeb, 0x11, 0x77, 0x54,
0x26, 0xe4, 0xde, 0xad, 0x0f, 0x9d, 0x51, 0x1b, 0x97, 0x21, 0x3a, 0x82, 0xb6, 0xc8, 0xf4, 0x52,
0x64, 0x3c, 0x76, 0x1b, 0x39, 0xb5, 0x8d, 0x0d, 0xc7, 0x78, 0x24, 0x6e, 0x19, 0x5f, 0xb9, 0x7b,
0x05, 0x57, 0xc6, 0xde, 0x6f, 0x07, 0x06, 0xe5, 0xfe, 0xa8, 0x12, 0x99, 0x8c, 0xa8, 0x42, 0x1e,
0xd4, 0xcf, 0xc2, 0x85, 0x9d, 0x67, 0x50, 0xb4, 0xb5, 0x7b, 0x57, 0x6c, 0x48, 0xa3, 0xc1, 0x93,
0x0b, 0xdb, 0x7a, 0x55, 0x83, 0x27, 0x17, 0xd8, 0x90, 0x68, 0x0c, 0x2d, 0x55, 0xac, 0x38, 0x6f,
0xb7, 0x7b, 0x72, 0x50, 0xd1, 0xd9, 0xf5, 0xe3, 0x52, 0x64, 0x72, 0xce, 0xc2, 0x45, 0xde, 0xff,
0xe3, 0x9c, 0x33, 0x53, 0xd7, 0x5c, 0xc8, 0x18, 0x5a, 0xbc, 0xd8, 0x7f, 0x3e, 0xcb, 0xe3, 0x9c,
0xf6, 0x6d, 0x70, 0x29, 0xf2, 0xfe, 0xd4, 0xa1, 0x65, 0x39, 0xd4, 0x87, 0x5a, 0xe0, 0xe7, 0x63,
0x75, 0x70, 0x2d, 0xf0, 0xd1, 0x5b, 0x68, 0x09, 0x19, 0x53, 0x19, 0xf8, 0x76, 0x8e, 0x5e, 0x91,
0xeb, 0x1b, 0x5b, 0x05, 0x5c, 0xe3, 0x92, 0x44, 0x6f, 0xa0, 0x19, 0x53, 0x92, 0x04, 0xbe, 0x1d,
0xa3, 0x2a, 0xb3, 0x1c, 0x3a, 0x86, 0x76, 0x9c, 0x49, 0xa2, 0x99, 0xe0, 0x76, 0x84, 0xf2, 0x92,
0x2c, 0x8a, 0xb7, 0x3c, 0x7a, 0x05, 0x7b, 0xa9, 0x64, 0x11, 0xb5, 0x33, 0x74, 0x0b, 0x61, 0x68,
0x20, 0x5c, 0x30, 0x68, 0x0c, 0x9d, 0x65, 0x42, 0xa2, 0x4d, 0xc2, 0x94, 0x76, 0x9b, 0xff, 0xae,
0x64, 0xaa, 0xd7, 0x93, 0x38, 0x96, 0x54, 0x29, 0xbc, 0x93, 0xa0, 0x53, 0xe8, 0x45, 0x22, 0xe3,
0x9a, 0xca, 0x94, 0x48, 0x7d, 0xef, 0xb6, 0x9e, 0xb0, 0x54, 0x54, 0xe8, 0x3d, 0xb4, 0x59, 0x4c,
0xb9, 0x66, 0xfa, 0xde, 0x6d, 0x0f, 0x9d, 0x51, 0xff, 0xe4, 0x65, 0xe1, 0x08, 0x2c, 0x7a, 0x4e,
0xef, 0x68, 0x82, 0xb7, 0x22, 0x34, 0x80, 0xba, 0x26, 0x2b, 0xb7, 0x33, 0x74, 0x46, 0x3d, 0x6c,
0x3e, 0xd1, 0x29, 0x74, 0x64, 0x79, 0x3a, 0x2e, 0xe4, 0x55, 0x0f, 0xab, 0xf7, 0x50, 0xb2, 0x78,
0x27, 0x44, 0xef, 0xa0, 0xa9, 0x34, 0xd1, 0x99, 0x72, 0xbb, 0x79, 0xd9, 0xea, 0x33, 0x8e, 0xaf,
0x72, 0x0e, 0x5b, 0x8d, 0x77, 0x0c, 0xcd, 0x02, 0x41, 0x00, 0xcd, 0xc9, 0xd9, 0x75, 0xf0, 0x7d,
0x3a, 0x78, 0x86, 0x0e, 0x60, 0x10, 0x4e, 0xe7, 0x7e, 0x30, 0x9f, 0xdd, 0xf8, 0xd3, 0xf3, 0xe9,
0x75, 0x70, 0x39, 0x1f, 0x38, 0xcb, 0x66, 0xfe, 0x2f, 0xfa, 0xf4, 0x37, 0x00, 0x00, 0xff, 0xff,
0xbd, 0xd9, 0x4e, 0xf0, 0xd5, 0x04, 0x00, 0x00,
}
| { return proto.CompactTextString(m) } | identifier_body |
ask_plan.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: ask_plan.proto
/*
Package sonm is a generated protocol buffer package.
It is generated from these files:
ask_plan.proto
benchmarks.proto
bigint.proto
capabilities.proto
container.proto
dwh.proto
insonmnia.proto
marketplace.proto
net.proto
node.proto
relay.proto
rendezvous.proto
timestamp.proto
volume.proto
worker.proto
It has these top-level messages:
AskPlanCPU
AskPlanGPU
AskPlanRAM
AskPlanStorage
AskPlanNetwork
AskPlanResources
AskPlan
Benchmark
BigInt
CPUDevice
CPU
RAMDevice
RAM
GPUDevice
GPU
Network
StorageDevice
Storage
NetworkSpec
Container
SortingOption
DealsRequest
DWHDealsReply
DWHDeal
DealConditionsRequest
DealConditionsReply
OrdersRequest
MatchingOrdersRequest
DWHOrdersReply
DWHOrder
DealCondition
DWHWorker
ProfilesRequest
ProfilesReply
Profile
BlacklistRequest
BlacklistReply
ValidatorsRequest
ValidatorsReply
Validator
DealChangeRequestsReply
DealChangeRequest
DealPayment
WorkersRequest
WorkersReply
Certificate
MaxMinUint64
MaxMinBig
MaxMinTimestamp
CmpUint64
BlacklistQuery
Empty
ID
EthID
TaskID
Count
CPUUsage
MemoryUsage
NetworkUsage
ResourceUsage
ContainerRestartPolicy
TaskLogsRequest
TaskLogsChunk
TaskResourceRequirements
Chunk
Progress
Duration
EthAddress
DataSize
DataSizeRate
Price
GetOrdersReply
Benchmarks
Deal
Order
BidNetwork
BidResources
BidOrder
Addr
SocketAddr
Endpoints
JoinNetworkRequest
TaskListRequest
DealFinishRequest
DealsReply
OpenDealRequest
WorkerRemoveRequest
WorkerListReply
BalanceReply
HandshakeRequest
DiscoverResponse
HandshakeResponse
RelayClusterReply
RelayMetrics
NetMetrics
ConnectRequest
PublishRequest
RendezvousReply
RendezvousState
RendezvousMeeting
ResolveMetaReply
Timestamp
Volume
StartTaskRequest
WorkerJoinNetworkRequest
StartTaskReply
StatusReply
AskPlansReply
TaskListReply
DevicesReply
PullTaskRequest
DealInfoReply
TaskStatusReply
StatusMapReply
*/
package sonm
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type AskPlan_Status int32
const (
AskPlan_ACTIVE AskPlan_Status = 0
AskPlan_PENDING_DELETION AskPlan_Status = 1
)
var AskPlan_Status_name = map[int32]string{
0: "ACTIVE",
1: "PENDING_DELETION",
}
var AskPlan_Status_value = map[string]int32{
"ACTIVE": 0,
"PENDING_DELETION": 1,
}
func (x AskPlan_Status) String() string {
return proto.EnumName(AskPlan_Status_name, int32(x))
}
func (AskPlan_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} }
type AskPlanCPU struct {
CorePercents uint64 `protobuf:"varint,1,opt,name=core_percents,json=corePercents" json:"core_percents,omitempty"`
}
func (m *AskPlanCPU) Reset() { *m = AskPlanCPU{} }
func (m *AskPlanCPU) String() string { return proto.CompactTextString(m) }
func (*AskPlanCPU) ProtoMessage() {}
func (*AskPlanCPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *AskPlanCPU) GetCorePercents() uint64 {
if m != nil {
return m.CorePercents
}
return 0
}
type AskPlanGPU struct {
Indexes []uint64 `protobuf:"varint,1,rep,packed,name=indexes" json:"indexes,omitempty"`
Hashes []string `protobuf:"bytes,2,rep,name=hashes" json:"hashes,omitempty"`
}
func (m *AskPlanGPU) Reset() { *m = AskPlanGPU{} }
func (m *AskPlanGPU) String() string { return proto.CompactTextString(m) }
func (*AskPlanGPU) ProtoMessage() {}
func (*AskPlanGPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *AskPlanGPU) GetIndexes() []uint64 {
if m != nil {
return m.Indexes
}
return nil
}
func (m *AskPlanGPU) GetHashes() []string {
if m != nil {
return m.Hashes
}
return nil
}
type AskPlanRAM struct {
Size *DataSize `protobuf:"bytes,1,opt,name=size" json:"size,omitempty"`
}
func (m *AskPlanRAM) Reset() { *m = AskPlanRAM{} }
func (m *AskPlanRAM) String() string { return proto.CompactTextString(m) }
func (*AskPlanRAM) ProtoMessage() {}
func (*AskPlanRAM) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *AskPlanRAM) GetSize() *DataSize {
if m != nil {
return m.Size
}
return nil
}
type AskPlanStorage struct {
Size *DataSize `protobuf:"bytes,1,opt,name=size" json:"size,omitempty"`
}
func (m *AskPlanStorage) Reset() { *m = AskPlanStorage{} }
func (m *AskPlanStorage) String() string { return proto.CompactTextString(m) }
func (*AskPlanStorage) ProtoMessage() {}
func (*AskPlanStorage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *AskPlanStorage) GetSize() *DataSize {
if m != nil {
return m.Size
}
return nil
}
type AskPlanNetwork struct {
ThroughputIn *DataSizeRate `protobuf:"bytes,1,opt,name=throughputIn" json:"throughputIn,omitempty"`
ThroughputOut *DataSizeRate `protobuf:"bytes,2,opt,name=throughputOut" json:"throughputOut,omitempty"`
Overlay bool `protobuf:"varint,3,opt,name=overlay" json:"overlay,omitempty"`
Outbound bool `protobuf:"varint,4,opt,name=outbound" json:"outbound,omitempty"`
Incoming bool `protobuf:"varint,5,opt,name=incoming" json:"incoming,omitempty"`
}
func (m *AskPlanNetwork) Reset() { *m = AskPlanNetwork{} }
func (m *AskPlanNetwork) String() string { return proto.CompactTextString(m) }
func (*AskPlanNetwork) ProtoMessage() {}
func (*AskPlanNetwork) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *AskPlanNetwork) GetThroughputIn() *DataSizeRate {
if m != nil {
return m.ThroughputIn
}
return nil
}
func (m *AskPlanNetwork) GetThroughputOut() *DataSizeRate {
if m != nil {
return m.ThroughputOut
}
return nil
}
func (m *AskPlanNetwork) GetOverlay() bool {
if m != nil {
return m.Overlay
}
return false
}
func (m *AskPlanNetwork) GetOutbound() bool {
if m != nil {
return m.Outbound
}
return false
}
func (m *AskPlanNetwork) GetIncoming() bool {
if m != nil {
return m.Incoming
}
return false
}
type AskPlanResources struct {
CPU *AskPlanCPU `protobuf:"bytes,1,opt,name=CPU" json:"CPU,omitempty"`
RAM *AskPlanRAM `protobuf:"bytes,2,opt,name=RAM" json:"RAM,omitempty"`
Storage *AskPlanStorage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
GPU *AskPlanGPU `protobuf:"bytes,4,opt,name=GPU" json:"GPU,omitempty"`
Network *AskPlanNetwork `protobuf:"bytes,5,opt,name=network" json:"network,omitempty"`
}
func (m *AskPlanResources) Reset() { *m = AskPlanResources{} }
func (m *AskPlanResources) String() string { return proto.CompactTextString(m) }
func (*AskPlanResources) ProtoMessage() {}
func (*AskPlanResources) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *AskPlanResources) GetCPU() *AskPlanCPU {
if m != nil {
return m.CPU
}
return nil
}
func (m *AskPlanResources) GetRAM() *AskPlanRAM {
if m != nil {
return m.RAM
}
return nil
}
func (m *AskPlanResources) GetStorage() *AskPlanStorage {
if m != nil {
return m.Storage
}
return nil
}
func (m *AskPlanResources) GetGPU() *AskPlanGPU {
if m != nil {
return m.GPU
}
return nil
}
func (m *AskPlanResources) GetNetwork() *AskPlanNetwork {
if m != nil {
return m.Network
}
return nil
}
type AskPlan struct {
ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"`
OrderID *BigInt `protobuf:"bytes,2,opt,name=orderID" json:"orderID,omitempty"`
DealID *BigInt `protobuf:"bytes,3,opt,name=dealID" json:"dealID,omitempty"`
Duration *Duration `protobuf:"bytes,4,opt,name=duration" json:"duration,omitempty"`
Price *Price `protobuf:"bytes,5,opt,name=price" json:"price,omitempty"`
Blacklist *EthAddress `protobuf:"bytes,6,opt,name=blacklist" json:"blacklist,omitempty"`
Counterparty *EthAddress `protobuf:"bytes,7,opt,name=counterparty" json:"counterparty,omitempty"`
Identity IdentityLevel `protobuf:"varint,8,opt,name=identity,enum=sonm.IdentityLevel" json:"identity,omitempty"`
Tag []byte `protobuf:"bytes,9,opt,name=tag,proto3" json:"tag,omitempty"`
Resources *AskPlanResources `protobuf:"bytes,10,opt,name=resources" json:"resources,omitempty"`
Status AskPlan_Status `protobuf:"varint,11,opt,name=status,enum=sonm.AskPlan_Status" json:"status,omitempty"`
}
func (m *AskPlan) Reset() { *m = AskPlan{} }
func (m *AskPlan) String() string { return proto.CompactTextString(m) }
func (*AskPlan) ProtoMessage() {}
func (*AskPlan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *AskPlan) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *AskPlan) GetOrderID() *BigInt {
if m != nil {
return m.OrderID
}
return nil
}
func (m *AskPlan) GetDealID() *BigInt {
if m != nil {
return m.DealID
}
return nil
}
func (m *AskPlan) GetDuration() *Duration {
if m != nil {
return m.Duration
}
return nil
}
func (m *AskPlan) GetPrice() *Price {
if m != nil {
return m.Price
}
return nil
}
func (m *AskPlan) GetBlacklist() *EthAddress {
if m != nil {
return m.Blacklist
}
return nil
}
func (m *AskPlan) GetCounterparty() *EthAddress {
if m != nil {
return m.Counterparty
}
return nil
}
func (m *AskPlan) GetIdentity() IdentityLevel {
if m != nil |
return IdentityLevel_ANONYMOUS
}
func (m *AskPlan) GetTag() []byte {
if m != nil {
return m.Tag
}
return nil
}
func (m *AskPlan) GetResources() *AskPlanResources {
if m != nil {
return m.Resources
}
return nil
}
func (m *AskPlan) GetStatus() AskPlan_Status {
if m != nil {
return m.Status
}
return AskPlan_ACTIVE
}
func init() {
proto.RegisterType((*AskPlanCPU)(nil), "sonm.AskPlanCPU")
proto.RegisterType((*AskPlanGPU)(nil), "sonm.AskPlanGPU")
proto.RegisterType((*AskPlanRAM)(nil), "sonm.AskPlanRAM")
proto.RegisterType((*AskPlanStorage)(nil), "sonm.AskPlanStorage")
proto.RegisterType((*AskPlanNetwork)(nil), "sonm.AskPlanNetwork")
proto.RegisterType((*AskPlanResources)(nil), "sonm.AskPlanResources")
proto.RegisterType((*AskPlan)(nil), "sonm.AskPlan")
proto.RegisterEnum("sonm.AskPlan_Status", AskPlan_Status_name, AskPlan_Status_value)
}
func init() { proto.RegisterFile("ask_plan.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 632 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x6a, 0xdb, 0x30,
0x14, 0x9e, 0x93, 0x34, 0x3f, 0x27, 0x69, 0x96, 0x69, 0xa5, 0x98, 0x5e, 0x65, 0xde, 0x18, 0xa1,
0x8c, 0x6c, 0xeb, 0xca, 0xd8, 0xd5, 0x20, 0xab, 0x43, 0x30, 0xb4, 0xa9, 0x51, 0x9b, 0xdd, 0x16,
0xc5, 0x16, 0x89, 0x88, 0x2b, 0x19, 0x49, 0xee, 0xd6, 0x3e, 0xe7, 0xae, 0xf7, 0x0a, 0x7b, 0x85,
0x21, 0x5b, 0x4e, 0xe6, 0x42, 0x61, 0x77, 0x3e, 0xdf, 0xcf, 0xf9, 0xd3, 0xc1, 0xd0, 0x27, 0x6a,
0x73, 0x93, 0x26, 0x84, 0x8f, 0x53, 0x29, 0xb4, 0x40, 0x0d, 0x25, 0xf8, 0xed, 0x51, 0x6f, 0xc9,
0x56, 0x8c, 0xeb, 0x02, 0x3b, 0x7a, 0xce, 0xb8, 0x41, 0x39, 0x23, 0x16, 0x78, 0x71, 0x4b, 0xe4,
0x86, 0xea, 0x34, 0x21, 0x11, 0x2d, 0x20, 0xef, 0x23, 0xc0, 0x44, 0x6d, 0xc2, 0x84, 0xf0, 0xb3,
0x70, 0x81, 0x5e, 0xc3, 0x7e, 0x24, 0x24, 0xbd, 0x49, 0xa9, 0x8c, 0x28, 0xd7, 0xca, 0x75, 0x86,
0xce, 0xa8, 0x81, 0x7b, 0x06, 0x0c, 0x2d, 0xe6, 0x7d, 0xdd, 0x5a, 0x66, 0xe1, 0x02, 0xb9, 0xd0,
0x62, 0x3c, 0xa6, 0x3f, 0xa9, 0x11, 0xd7, 0x47, 0x0d, 0x5c, 0x86, 0xe8, 0x10, 0x9a, 0x6b, 0xa2,
0xd6, 0x54, 0xb9, 0xb5, 0x61, 0x7d, 0xd4, 0xc1, 0x36, 0xf2, 0x3e, 0x6c, 0xfd, 0x78, 0x72, 0x81,
0x3c, 0x68, 0x28, 0xf6, 0x40, 0xf3, 0x4a, 0xdd, 0x93, 0xfe, 0xd8, 0x74, 0x3c, 0xf6, 0x89, 0x26,
0x57, 0xec, 0x81, 0xe2, 0x9c, 0xf3, 0x4e, 0xa1, 0x6f, 0x1d, 0x57, 0x5a, 0x48, 0xb2, 0xa2, 0xff,
0xe5, 0xfa, 0xe5, 0x6c, 0x6d, 0x73, 0xaa, 0x7f, 0x08, 0xb9, 0x41, 0x9f, 0xa1, 0xa7, 0xd7, 0x52,
0x64, 0xab, 0x75, 0x9a, 0xe9, 0x80, 0x5b, 0x3b, 0x7a, 0x64, 0x27, 0x9a, 0xe2, 0x8a, 0x0e, 0x7d,
0x81, 0xfd, 0x5d, 0x7c, 0x99, 0x69, 0xb7, 0xf6, 0xa4, 0xb1, 0x2a, 0x34, 0xeb, 0x11, 0x77, 0x54,
0x26, 0xe4, 0xde, 0xad, 0x0f, 0x9d, 0x51, 0x1b, 0x97, 0x21, 0x3a, 0x82, 0xb6, 0xc8, 0xf4, 0x52,
0x64, 0x3c, 0x76, 0x1b, 0x39, 0xb5, 0x8d, 0x0d, 0xc7, 0x78, 0x24, 0x6e, 0x19, 0x5f, 0xb9, 0x7b,
0x05, 0x57, 0xc6, 0xde, 0x6f, 0x07, 0x06, 0xe5, 0xfe, 0xa8, 0x12, 0x99, 0x8c, 0xa8, 0x42, 0x1e,
0xd4, 0xcf, 0xc2, 0x85, 0x9d, 0x67, 0x50, 0xb4, 0xb5, 0x7b, 0x57, 0x6c, 0x48, 0xa3, 0xc1, 0x93,
0x0b, 0xdb, 0x7a, 0x55, 0x83, 0x27, 0x17, 0xd8, 0x90, 0x68, 0x0c, 0x2d, 0x55, 0xac, 0x38, 0x6f,
0xb7, 0x7b, 0x72, 0x50, 0xd1, 0xd9, 0xf5, 0xe3, 0x52, 0x64, 0x72, 0xce, 0xc2, 0x45, 0xde, 0xff,
0xe3, 0x9c, 0x33, 0x53, 0xd7, 0x5c, 0xc8, 0x18, 0x5a, 0xbc, 0xd8, 0x7f, 0x3e, 0xcb, 0xe3, 0x9c,
0xf6, 0x6d, 0x70, 0x29, 0xf2, 0xfe, 0xd4, 0xa1, 0x65, 0x39, 0xd4, 0x87, 0x5a, 0xe0, 0xe7, 0x63,
0x75, 0x70, 0x2d, 0xf0, 0xd1, 0x5b, 0x68, 0x09, 0x19, 0x53, 0x19, 0xf8, 0x76, 0x8e, 0x5e, 0x91,
0xeb, 0x1b, 0x5b, 0x05, 0x5c, 0xe3, 0x92, 0x44, 0x6f, 0xa0, 0x19, 0x53, 0x92, 0x04, 0xbe, 0x1d,
0xa3, 0x2a, 0xb3, 0x1c, 0x3a, 0x86, 0x76, 0x9c, 0x49, 0xa2, 0x99, 0xe0, 0x76, 0x84, 0xf2, 0x92,
0x2c, 0x8a, 0xb7, 0x3c, 0x7a, 0x05, 0x7b, 0xa9, 0x64, 0x11, 0xb5, 0x33, 0x74, 0x0b, 0x61, 0x68,
0x20, 0x5c, 0x30, 0x68, 0x0c, 0x9d, 0x65, 0x42, 0xa2, 0x4d, 0xc2, 0x94, 0x76, 0x9b, 0xff, 0xae,
0x64, 0xaa, 0xd7, 0x93, 0x38, 0x96, 0x54, 0x29, 0xbc, 0x93, 0xa0, 0x53, 0xe8, 0x45, 0x22, 0xe3,
0x9a, 0xca, 0x94, 0x48, 0x7d, 0xef, 0xb6, 0x9e, 0xb0, 0x54, 0x54, 0xe8, 0x3d, 0xb4, 0x59, 0x4c,
0xb9, 0x66, 0xfa, 0xde, 0x6d, 0x0f, 0x9d, 0x51, 0xff, 0xe4, 0x65, 0xe1, 0x08, 0x2c, 0x7a, 0x4e,
0xef, 0x68, 0x82, 0xb7, 0x22, 0x34, 0x80, 0xba, 0x26, 0x2b, 0xb7, 0x33, 0x74, 0x46, 0x3d, 0x6c,
0x3e, 0xd1, 0x29, 0x74, 0x64, 0x79, 0x3a, 0x2e, 0xe4, 0x55, 0x0f, 0xab, 0xf7, 0x50, 0xb2, 0x78,
0x27, 0x44, 0xef, 0xa0, 0xa9, 0x34, 0xd1, 0x99, 0x72, 0xbb, 0x79, 0xd9, 0xea, 0x33, 0x8e, 0xaf,
0x72, 0x0e, 0x5b, 0x8d, 0x77, 0x0c, 0xcd, 0x02, 0x41, 0x00, 0xcd, 0xc9, 0xd9, 0x75, 0xf0, 0x7d,
0x3a, 0x78, 0x86, 0x0e, 0x60, 0x10, 0x4e, 0xe7, 0x7e, 0x30, 0x9f, 0xdd, 0xf8, 0xd3, 0xf3, 0xe9,
0x75, 0x70, 0x39, 0x1f, 0x38, 0xcb, 0x66, 0xfe, 0x2f, 0xfa, 0xf4, 0x37, 0x00, 0x00, 0xff, 0xff,
0xbd, 0xd9, 0x4e, 0xf0, 0xd5, 0x04, 0x00, 0x00,
}
| {
return m.Identity
} | conditional_block |
log.py | #
# Copyright (c) 2018 Two Sigma Open Source, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
''':mod:`~marbles.core.marbles` can log information about each
assertion called.
If configured, the :data:`marbles.core.log.logger` will log a json
object for each assertion and its success or failure, as well as any
other attributes of interest specified by the test author.
The captured information includes the assertion's args and kwargs,
msg, note, local variables (for failed assertions, and also for
passing assertions if verbose logging is turned on), and the result of
the assertion.
Configuration is handled via the environment variables
:envvar:`MARBLES_LOGFILE`, :envvar:`MARBLES_TEST_CASE_ATTRS`,
:envvar:`MARBLES_TEST_CASE_ATTRS_VERBOSE`,
:envvar:`MARBLES_LOG_VERBOSE`, or via the
:meth:`AssertionLogger.configure` method. Environment variables
override those set with the :meth:`~AssertionLogger.configure` method,
so if a :mod:`marbles` program configures these programmatically, they
can always be overridden without changing the program.
Note that :class:`AssertionLogger` should not be instantiated
directly; instead, test authors should import and configure the
:data:`marbles.core.log.logger` as needed.
'''
import datetime
import inspect
import json
import os
from . import _stack
from . import __version__
# XXX(leif): I don't think it's worth the gymnastics it would take to
# test this function. We do test whether mixins are
# identified but catching all the cases here would be a
# lot.
def _class_defining_method(meth): # pragma: no cover
'''Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545.
'''
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return '{}.{}'.format(cls.__module__, cls.__name__)
meth = meth.__func__
if inspect.isfunction(meth):
module = meth.__qualname__.split('.<locals>', 1)[0]
cls = getattr(inspect.getmodule(meth), module.rsplit('.', 1)[0])
if isinstance(cls, type):
return '{}.{}'.format(cls.__module__, cls.__name__)
class AssertionLogger(object):
'''The :class:`AssertionLogger` logs json about each assertion.
This module exposes a single :class:`AssertionLogger`,
:data:`marbles.core.log.logger`, that is used during a marbles test
run. It can be configured with :meth:`configure` before running
the tests or via environment variables.
Example:
.. code-block:: py
import marbles.core
from marbles.core import log
if __name__ == '__main__':
log.logger.configure(logfile='/path/to/marbles.log',
attrs=['filename', 'date'])
marbles.core.main()
.. note::
If you configure logging within an ``if __name__ == '__main__'``,
block (as opposed to via environment variables), you must run
your tests with ``python /path/to/tests.py``. If you run your
tests with ``python -m marbles``, the
``if __name__ == '__main__'`` block won't get executed and the
logger won't get configured.
'''
def | (self):
self._logfile = None
self._logfilename = None
self._verbose = False
self._attrs = None
self._verbose_attrs = None
@staticmethod
def _open_if_needed(filename):
if isinstance(filename, (str, bytes)):
return open(filename, 'w')
else:
# Assume is already file-like
return filename
def configure(self, **kwargs):
'''Configure what assertion logging is done.
Settings configured with this method are overridden by
environment variables.
Parameters
----------
logfile : str or bytes or file object
If a string or bytes object, we write to that filename.
If an open file object, we just write to it. If None,
disable logging. If we open the file, we open it in
``'w'`` mode, so any contents will be overwritten.
attrs : list of str
Capture these attributes on the TestCase being run when
logging an assertion. For example, if you are testing
multiple resources, make sure the resource name is a
member of your TestCase, and configure marbles logging
with that name. These are only captured on failure.
verbose_attrs : list of str
Similar to attrs, but these attrs are captured even on
success.
verbose : bool or list of str
Fields (within the set {msg, note, locals}) to capture
even when the test is successful. By default, those three
fields are only captured on failure.
'''
if 'logfile' in kwargs:
# Note that kwargs['logfile'] might be an open file
# object, not a string. We deal with this in
# _open_if_needed, but refactoring it so that in that case
# it gets set on another attribute would be tricky to
# handle the lazy opening semantics that let us override
# it with MARBLES_LOGFILE, so instead we choose to let
# self._logfilename do double-duty: sometimes it's a name,
# sometimes it's sneakily a file object.
self._logfilename = kwargs['logfile']
if 'attrs' in kwargs:
self._attrs = kwargs['attrs']
if 'verbose_attrs' in kwargs:
self._verbose_attrs = kwargs['verbose_attrs']
if 'verbose' in kwargs:
self._verbose = kwargs['verbose']
@property
def log_enabled(self):
return self.logfile is not None
@property
def logfile(self):
if self._logfile:
return self._logfile
if self.logfilename:
self._logfile = self._open_if_needed(self.logfilename)
return self._logfile
@property
def logfilename(self):
return os.environ.get('MARBLES_LOGFILE', self._logfilename)
@property
def attrs(self):
try:
return os.environ['MARBLES_TEST_CASE_ATTRS'].split(',')
except KeyError:
return self._attrs or ()
@property
def verbose_attrs(self):
try:
return os.environ['MARBLES_TEST_CASE_ATTRS_VERBOSE'].split(',')
except KeyError:
return self._verbose_attrs or ()
@property
def verbose(self):
verbose = os.environ.get('MARBLES_LOG_VERBOSE', self._verbose)
verbose_attrs = ('msg', 'note', 'locals')
if isinstance(verbose, str):
if verbose.lower() == 'false':
return ()
elif verbose.lower() == 'true':
return verbose_attrs
else:
return verbose.split(',')
elif verbose is True:
return verbose_attrs
else:
return verbose or ()
def _log_assertion(self, case, assertion, args, kwargs, msg, note,
*exc_info):
if not self.log_enabled:
return
now = datetime.datetime.now()
locals_, module, filename, lineno = _stack.get_stack_info()
passed = exc_info[0] is None
doc = {
'case': str(case),
'test_case': case.__class__.__name__,
'test_method': case._testMethodName,
'module': module,
'file': filename,
'line': lineno,
'assertion': assertion.__name__,
'args': [str(a) for a in args],
'kwargs': [{'key': k, 'value': str(v)} for k, v in kwargs.items()],
'assertion_class': _class_defining_method(assertion),
'marbles_version': __version__,
'@timestamp': now.strftime('%Y-%m-%dT%H:%M:%S.%f')
}
verbose_elements = {
'msg': msg,
'note': note.format(**locals_) if note else None,
'locals': [{'key': k, 'value': str(v)} for k, v in locals_.items()
if (k not in ('msg', 'note', 'self')
and not k.startswith('_'))]
}
if not passed:
doc.update(verbose_elements)
elif self.verbose:
doc.update({k: v for k, v in verbose_elements.items()
if k in self.verbose})
doc.update({attr: str(getattr(case, attr, None))
for attr in self.verbose_attrs})
if not passed:
doc.update({attr: str(getattr(case, attr, None))
for attr in self.attrs})
if passed:
doc['result'] = 'pass'
else:
doc['result'] = 'fail'
json.dump(doc, self.logfile)
self.logfile.write('\n')
logger = AssertionLogger()
| __init__ | identifier_name |
log.py | #
# Copyright (c) 2018 Two Sigma Open Source, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
''':mod:`~marbles.core.marbles` can log information about each
assertion called.
If configured, the :data:`marbles.core.log.logger` will log a json
object for each assertion and its success or failure, as well as any
other attributes of interest specified by the test author.
The captured information includes the assertion's args and kwargs,
msg, note, local variables (for failed assertions, and also for
passing assertions if verbose logging is turned on), and the result of
the assertion.
Configuration is handled via the environment variables
:envvar:`MARBLES_LOGFILE`, :envvar:`MARBLES_TEST_CASE_ATTRS`,
:envvar:`MARBLES_TEST_CASE_ATTRS_VERBOSE`,
:envvar:`MARBLES_LOG_VERBOSE`, or via the
:meth:`AssertionLogger.configure` method. Environment variables
override those set with the :meth:`~AssertionLogger.configure` method,
so if a :mod:`marbles` program configures these programmatically, they
can always be overridden without changing the program.
Note that :class:`AssertionLogger` should not be instantiated
directly; instead, test authors should import and configure the
:data:`marbles.core.log.logger` as needed.
'''
import datetime
import inspect
import json
import os
from . import _stack
from . import __version__
# XXX(leif): I don't think it's worth the gymnastics it would take to
# test this function. We do test whether mixins are
# identified but catching all the cases here would be a
# lot.
def _class_defining_method(meth): # pragma: no cover
'''Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545.
'''
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return '{}.{}'.format(cls.__module__, cls.__name__)
meth = meth.__func__
if inspect.isfunction(meth):
module = meth.__qualname__.split('.<locals>', 1)[0]
cls = getattr(inspect.getmodule(meth), module.rsplit('.', 1)[0])
if isinstance(cls, type):
return '{}.{}'.format(cls.__module__, cls.__name__)
class AssertionLogger(object):
'''The :class:`AssertionLogger` logs json about each assertion.
This module exposes a single :class:`AssertionLogger`,
:data:`marbles.core.log.logger`, that is used during a marbles test
run. It can be configured with :meth:`configure` before running
the tests or via environment variables.
Example:
.. code-block:: py
import marbles.core
from marbles.core import log
if __name__ == '__main__':
log.logger.configure(logfile='/path/to/marbles.log',
attrs=['filename', 'date'])
marbles.core.main()
.. note::
If you configure logging within an ``if __name__ == '__main__'``,
block (as opposed to via environment variables), you must run
your tests with ``python /path/to/tests.py``. If you run your
tests with ``python -m marbles``, the
``if __name__ == '__main__'`` block won't get executed and the
logger won't get configured.
'''
def __init__(self):
self._logfile = None
self._logfilename = None
self._verbose = False
self._attrs = None
self._verbose_attrs = None
@staticmethod
def _open_if_needed(filename):
if isinstance(filename, (str, bytes)):
return open(filename, 'w')
else:
# Assume is already file-like
return filename
def configure(self, **kwargs):
'''Configure what assertion logging is done.
Settings configured with this method are overridden by
environment variables.
Parameters
----------
logfile : str or bytes or file object
If a string or bytes object, we write to that filename.
If an open file object, we just write to it. If None,
disable logging. If we open the file, we open it in
``'w'`` mode, so any contents will be overwritten.
attrs : list of str
Capture these attributes on the TestCase being run when
logging an assertion. For example, if you are testing
multiple resources, make sure the resource name is a
member of your TestCase, and configure marbles logging
with that name. These are only captured on failure.
verbose_attrs : list of str
Similar to attrs, but these attrs are captured even on
success.
verbose : bool or list of str
Fields (within the set {msg, note, locals}) to capture
even when the test is successful. By default, those three
fields are only captured on failure.
'''
if 'logfile' in kwargs:
# Note that kwargs['logfile'] might be an open file
# object, not a string. We deal with this in
# _open_if_needed, but refactoring it so that in that case
# it gets set on another attribute would be tricky to
# handle the lazy opening semantics that let us override
# it with MARBLES_LOGFILE, so instead we choose to let
# self._logfilename do double-duty: sometimes it's a name,
# sometimes it's sneakily a file object.
self._logfilename = kwargs['logfile']
if 'attrs' in kwargs:
self._attrs = kwargs['attrs']
if 'verbose_attrs' in kwargs:
self._verbose_attrs = kwargs['verbose_attrs']
if 'verbose' in kwargs:
self._verbose = kwargs['verbose']
@property
def log_enabled(self):
return self.logfile is not None
@property
def logfile(self):
|
@property
def logfilename(self):
return os.environ.get('MARBLES_LOGFILE', self._logfilename)
@property
def attrs(self):
try:
return os.environ['MARBLES_TEST_CASE_ATTRS'].split(',')
except KeyError:
return self._attrs or ()
@property
def verbose_attrs(self):
try:
return os.environ['MARBLES_TEST_CASE_ATTRS_VERBOSE'].split(',')
except KeyError:
return self._verbose_attrs or ()
@property
def verbose(self):
verbose = os.environ.get('MARBLES_LOG_VERBOSE', self._verbose)
verbose_attrs = ('msg', 'note', 'locals')
if isinstance(verbose, str):
if verbose.lower() == 'false':
return ()
elif verbose.lower() == 'true':
return verbose_attrs
else:
return verbose.split(',')
elif verbose is True:
return verbose_attrs
else:
return verbose or ()
def _log_assertion(self, case, assertion, args, kwargs, msg, note,
*exc_info):
if not self.log_enabled:
return
now = datetime.datetime.now()
locals_, module, filename, lineno = _stack.get_stack_info()
passed = exc_info[0] is None
doc = {
'case': str(case),
'test_case': case.__class__.__name__,
'test_method': case._testMethodName,
'module': module,
'file': filename,
'line': lineno,
'assertion': assertion.__name__,
'args': [str(a) for a in args],
'kwargs': [{'key': k, 'value': str(v)} for k, v in kwargs.items()],
'assertion_class': _class_defining_method(assertion),
'marbles_version': __version__,
'@timestamp': now.strftime('%Y-%m-%dT%H:%M:%S.%f')
}
verbose_elements = {
'msg': msg,
'note': note.format(**locals_) if note else None,
'locals': [{'key': k, 'value': str(v)} for k, v in locals_.items()
if (k not in ('msg', 'note', 'self')
and not k.startswith('_'))]
}
if not passed:
doc.update(verbose_elements)
elif self.verbose:
doc.update({k: v for k, v in verbose_elements.items()
if k in self.verbose})
doc.update({attr: str(getattr(case, attr, None))
for attr in self.verbose_attrs})
if not passed:
doc.update({attr: str(getattr(case, attr, None))
for attr in self.attrs})
if passed:
doc['result'] = 'pass'
else:
doc['result'] = 'fail'
json.dump(doc, self.logfile)
self.logfile.write('\n')
logger = AssertionLogger()
| if self._logfile:
return self._logfile
if self.logfilename:
self._logfile = self._open_if_needed(self.logfilename)
return self._logfile | identifier_body |
log.py | #
# Copyright (c) 2018 Two Sigma Open Source, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
''':mod:`~marbles.core.marbles` can log information about each | other attributes of interest specified by the test author.
The captured information includes the assertion's args and kwargs,
msg, note, local variables (for failed assertions, and also for
passing assertions if verbose logging is turned on), and the result of
the assertion.
Configuration is handled via the environment variables
:envvar:`MARBLES_LOGFILE`, :envvar:`MARBLES_TEST_CASE_ATTRS`,
:envvar:`MARBLES_TEST_CASE_ATTRS_VERBOSE`,
:envvar:`MARBLES_LOG_VERBOSE`, or via the
:meth:`AssertionLogger.configure` method. Environment variables
override those set with the :meth:`~AssertionLogger.configure` method,
so if a :mod:`marbles` program configures these programmatically, they
can always be overridden without changing the program.
Note that :class:`AssertionLogger` should not be instantiated
directly; instead, test authors should import and configure the
:data:`marbles.core.log.logger` as needed.
'''
import datetime
import inspect
import json
import os
from . import _stack
from . import __version__
# XXX(leif): I don't think it's worth the gymnastics it would take to
# test this function. We do test whether mixins are
# identified but catching all the cases here would be a
# lot.
def _class_defining_method(meth): # pragma: no cover
'''Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545.
'''
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return '{}.{}'.format(cls.__module__, cls.__name__)
meth = meth.__func__
if inspect.isfunction(meth):
module = meth.__qualname__.split('.<locals>', 1)[0]
cls = getattr(inspect.getmodule(meth), module.rsplit('.', 1)[0])
if isinstance(cls, type):
return '{}.{}'.format(cls.__module__, cls.__name__)
class AssertionLogger(object):
'''The :class:`AssertionLogger` logs json about each assertion.
This module exposes a single :class:`AssertionLogger`,
:data:`marbles.core.log.logger`, that is used during a marbles test
run. It can be configured with :meth:`configure` before running
the tests or via environment variables.
Example:
.. code-block:: py
import marbles.core
from marbles.core import log
if __name__ == '__main__':
log.logger.configure(logfile='/path/to/marbles.log',
attrs=['filename', 'date'])
marbles.core.main()
.. note::
If you configure logging within an ``if __name__ == '__main__'``,
block (as opposed to via environment variables), you must run
your tests with ``python /path/to/tests.py``. If you run your
tests with ``python -m marbles``, the
``if __name__ == '__main__'`` block won't get executed and the
logger won't get configured.
'''
def __init__(self):
self._logfile = None
self._logfilename = None
self._verbose = False
self._attrs = None
self._verbose_attrs = None
@staticmethod
def _open_if_needed(filename):
if isinstance(filename, (str, bytes)):
return open(filename, 'w')
else:
# Assume is already file-like
return filename
def configure(self, **kwargs):
'''Configure what assertion logging is done.
Settings configured with this method are overridden by
environment variables.
Parameters
----------
logfile : str or bytes or file object
If a string or bytes object, we write to that filename.
If an open file object, we just write to it. If None,
disable logging. If we open the file, we open it in
``'w'`` mode, so any contents will be overwritten.
attrs : list of str
Capture these attributes on the TestCase being run when
logging an assertion. For example, if you are testing
multiple resources, make sure the resource name is a
member of your TestCase, and configure marbles logging
with that name. These are only captured on failure.
verbose_attrs : list of str
Similar to attrs, but these attrs are captured even on
success.
verbose : bool or list of str
Fields (within the set {msg, note, locals}) to capture
even when the test is successful. By default, those three
fields are only captured on failure.
'''
if 'logfile' in kwargs:
# Note that kwargs['logfile'] might be an open file
# object, not a string. We deal with this in
# _open_if_needed, but refactoring it so that in that case
# it gets set on another attribute would be tricky to
# handle the lazy opening semantics that let us override
# it with MARBLES_LOGFILE, so instead we choose to let
# self._logfilename do double-duty: sometimes it's a name,
# sometimes it's sneakily a file object.
self._logfilename = kwargs['logfile']
if 'attrs' in kwargs:
self._attrs = kwargs['attrs']
if 'verbose_attrs' in kwargs:
self._verbose_attrs = kwargs['verbose_attrs']
if 'verbose' in kwargs:
self._verbose = kwargs['verbose']
@property
def log_enabled(self):
return self.logfile is not None
@property
def logfile(self):
if self._logfile:
return self._logfile
if self.logfilename:
self._logfile = self._open_if_needed(self.logfilename)
return self._logfile
@property
def logfilename(self):
return os.environ.get('MARBLES_LOGFILE', self._logfilename)
@property
def attrs(self):
try:
return os.environ['MARBLES_TEST_CASE_ATTRS'].split(',')
except KeyError:
return self._attrs or ()
@property
def verbose_attrs(self):
try:
return os.environ['MARBLES_TEST_CASE_ATTRS_VERBOSE'].split(',')
except KeyError:
return self._verbose_attrs or ()
@property
def verbose(self):
verbose = os.environ.get('MARBLES_LOG_VERBOSE', self._verbose)
verbose_attrs = ('msg', 'note', 'locals')
if isinstance(verbose, str):
if verbose.lower() == 'false':
return ()
elif verbose.lower() == 'true':
return verbose_attrs
else:
return verbose.split(',')
elif verbose is True:
return verbose_attrs
else:
return verbose or ()
def _log_assertion(self, case, assertion, args, kwargs, msg, note,
*exc_info):
if not self.log_enabled:
return
now = datetime.datetime.now()
locals_, module, filename, lineno = _stack.get_stack_info()
passed = exc_info[0] is None
doc = {
'case': str(case),
'test_case': case.__class__.__name__,
'test_method': case._testMethodName,
'module': module,
'file': filename,
'line': lineno,
'assertion': assertion.__name__,
'args': [str(a) for a in args],
'kwargs': [{'key': k, 'value': str(v)} for k, v in kwargs.items()],
'assertion_class': _class_defining_method(assertion),
'marbles_version': __version__,
'@timestamp': now.strftime('%Y-%m-%dT%H:%M:%S.%f')
}
verbose_elements = {
'msg': msg,
'note': note.format(**locals_) if note else None,
'locals': [{'key': k, 'value': str(v)} for k, v in locals_.items()
if (k not in ('msg', 'note', 'self')
and not k.startswith('_'))]
}
if not passed:
doc.update(verbose_elements)
elif self.verbose:
doc.update({k: v for k, v in verbose_elements.items()
if k in self.verbose})
doc.update({attr: str(getattr(case, attr, None))
for attr in self.verbose_attrs})
if not passed:
doc.update({attr: str(getattr(case, attr, None))
for attr in self.attrs})
if passed:
doc['result'] = 'pass'
else:
doc['result'] = 'fail'
json.dump(doc, self.logfile)
self.logfile.write('\n')
logger = AssertionLogger() | assertion called.
If configured, the :data:`marbles.core.log.logger` will log a json
object for each assertion and its success or failure, as well as any | random_line_split |
log.py | #
# Copyright (c) 2018 Two Sigma Open Source, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
''':mod:`~marbles.core.marbles` can log information about each
assertion called.
If configured, the :data:`marbles.core.log.logger` will log a json
object for each assertion and its success or failure, as well as any
other attributes of interest specified by the test author.
The captured information includes the assertion's args and kwargs,
msg, note, local variables (for failed assertions, and also for
passing assertions if verbose logging is turned on), and the result of
the assertion.
Configuration is handled via the environment variables
:envvar:`MARBLES_LOGFILE`, :envvar:`MARBLES_TEST_CASE_ATTRS`,
:envvar:`MARBLES_TEST_CASE_ATTRS_VERBOSE`,
:envvar:`MARBLES_LOG_VERBOSE`, or via the
:meth:`AssertionLogger.configure` method. Environment variables
override those set with the :meth:`~AssertionLogger.configure` method,
so if a :mod:`marbles` program configures these programmatically, they
can always be overridden without changing the program.
Note that :class:`AssertionLogger` should not be instantiated
directly; instead, test authors should import and configure the
:data:`marbles.core.log.logger` as needed.
'''
import datetime
import inspect
import json
import os
from . import _stack
from . import __version__
# XXX(leif): I don't think it's worth the gymnastics it would take to
# test this function. We do test whether mixins are
# identified but catching all the cases here would be a
# lot.
def _class_defining_method(meth): # pragma: no cover
'''Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545.
'''
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return '{}.{}'.format(cls.__module__, cls.__name__)
meth = meth.__func__
if inspect.isfunction(meth):
module = meth.__qualname__.split('.<locals>', 1)[0]
cls = getattr(inspect.getmodule(meth), module.rsplit('.', 1)[0])
if isinstance(cls, type):
return '{}.{}'.format(cls.__module__, cls.__name__)
class AssertionLogger(object):
'''The :class:`AssertionLogger` logs json about each assertion.
This module exposes a single :class:`AssertionLogger`,
:data:`marbles.core.log.logger`, that is used during a marbles test
run. It can be configured with :meth:`configure` before running
the tests or via environment variables.
Example:
.. code-block:: py
import marbles.core
from marbles.core import log
if __name__ == '__main__':
log.logger.configure(logfile='/path/to/marbles.log',
attrs=['filename', 'date'])
marbles.core.main()
.. note::
If you configure logging within an ``if __name__ == '__main__'``,
block (as opposed to via environment variables), you must run
your tests with ``python /path/to/tests.py``. If you run your
tests with ``python -m marbles``, the
``if __name__ == '__main__'`` block won't get executed and the
logger won't get configured.
'''
def __init__(self):
self._logfile = None
self._logfilename = None
self._verbose = False
self._attrs = None
self._verbose_attrs = None
@staticmethod
def _open_if_needed(filename):
if isinstance(filename, (str, bytes)):
return open(filename, 'w')
else:
# Assume is already file-like
return filename
def configure(self, **kwargs):
'''Configure what assertion logging is done.
Settings configured with this method are overridden by
environment variables.
Parameters
----------
logfile : str or bytes or file object
If a string or bytes object, we write to that filename.
If an open file object, we just write to it. If None,
disable logging. If we open the file, we open it in
``'w'`` mode, so any contents will be overwritten.
attrs : list of str
Capture these attributes on the TestCase being run when
logging an assertion. For example, if you are testing
multiple resources, make sure the resource name is a
member of your TestCase, and configure marbles logging
with that name. These are only captured on failure.
verbose_attrs : list of str
Similar to attrs, but these attrs are captured even on
success.
verbose : bool or list of str
Fields (within the set {msg, note, locals}) to capture
even when the test is successful. By default, those three
fields are only captured on failure.
'''
if 'logfile' in kwargs:
# Note that kwargs['logfile'] might be an open file
# object, not a string. We deal with this in
# _open_if_needed, but refactoring it so that in that case
# it gets set on another attribute would be tricky to
# handle the lazy opening semantics that let us override
# it with MARBLES_LOGFILE, so instead we choose to let
# self._logfilename do double-duty: sometimes it's a name,
# sometimes it's sneakily a file object.
self._logfilename = kwargs['logfile']
if 'attrs' in kwargs:
self._attrs = kwargs['attrs']
if 'verbose_attrs' in kwargs:
self._verbose_attrs = kwargs['verbose_attrs']
if 'verbose' in kwargs:
self._verbose = kwargs['verbose']
@property
def log_enabled(self):
return self.logfile is not None
@property
def logfile(self):
if self._logfile:
return self._logfile
if self.logfilename:
self._logfile = self._open_if_needed(self.logfilename)
return self._logfile
@property
def logfilename(self):
return os.environ.get('MARBLES_LOGFILE', self._logfilename)
@property
def attrs(self):
try:
return os.environ['MARBLES_TEST_CASE_ATTRS'].split(',')
except KeyError:
return self._attrs or ()
@property
def verbose_attrs(self):
try:
return os.environ['MARBLES_TEST_CASE_ATTRS_VERBOSE'].split(',')
except KeyError:
return self._verbose_attrs or ()
@property
def verbose(self):
verbose = os.environ.get('MARBLES_LOG_VERBOSE', self._verbose)
verbose_attrs = ('msg', 'note', 'locals')
if isinstance(verbose, str):
if verbose.lower() == 'false':
return ()
elif verbose.lower() == 'true':
return verbose_attrs
else:
return verbose.split(',')
elif verbose is True:
|
else:
return verbose or ()
def _log_assertion(self, case, assertion, args, kwargs, msg, note,
*exc_info):
if not self.log_enabled:
return
now = datetime.datetime.now()
locals_, module, filename, lineno = _stack.get_stack_info()
passed = exc_info[0] is None
doc = {
'case': str(case),
'test_case': case.__class__.__name__,
'test_method': case._testMethodName,
'module': module,
'file': filename,
'line': lineno,
'assertion': assertion.__name__,
'args': [str(a) for a in args],
'kwargs': [{'key': k, 'value': str(v)} for k, v in kwargs.items()],
'assertion_class': _class_defining_method(assertion),
'marbles_version': __version__,
'@timestamp': now.strftime('%Y-%m-%dT%H:%M:%S.%f')
}
verbose_elements = {
'msg': msg,
'note': note.format(**locals_) if note else None,
'locals': [{'key': k, 'value': str(v)} for k, v in locals_.items()
if (k not in ('msg', 'note', 'self')
and not k.startswith('_'))]
}
if not passed:
doc.update(verbose_elements)
elif self.verbose:
doc.update({k: v for k, v in verbose_elements.items()
if k in self.verbose})
doc.update({attr: str(getattr(case, attr, None))
for attr in self.verbose_attrs})
if not passed:
doc.update({attr: str(getattr(case, attr, None))
for attr in self.attrs})
if passed:
doc['result'] = 'pass'
else:
doc['result'] = 'fail'
json.dump(doc, self.logfile)
self.logfile.write('\n')
logger = AssertionLogger()
| return verbose_attrs | conditional_block |
fs.rs | use rlua::prelude::*;
use std::{
sync::Arc,
env,
fs::{self, OpenOptions},
io::{self, SeekFrom, prelude::*},
path::Path
};
use serde_json;
use rlua_serde;
use crate::bindings::system::LuaMetadata;
use regex::Regex;
//TODO: Move to having a common interface so IO can share the same binding
pub struct LuaFile(fs::File);
pub fn fs_open(_: &Lua, (path, mode): (String, Option<String>)) -> Result<LuaFile, LuaError> {
let mut option = OpenOptions::new();
if let Some(mode) = mode {
match mode.as_ref() {
"r" => option.read(true).write(false),
"w" => option.create(true).read(false).write(true),
"w+" => option.create(true).read(true).write(true).truncate(true),
"a" => option.append(true),
"rw" | _ => option.create(true).read(true).write(true),
};
} else {
option.create(true).read(true).write(true);
}
option.open(path)
.map(LuaFile)
.map_err(LuaError::external)
}
impl LuaUserData for LuaFile {
fn add_methods<'lua, M: LuaUserDataMethods<'lua, Self>>(methods: &mut M) {
methods.add_method_mut("read", |_, this: &mut LuaFile, len: Option<usize>|{
let bytes = match len {
Some(len) => {
let mut bytes = vec![0u8; len];
this.0.read(&mut bytes).map_err(LuaError::external)?;
bytes
},
None => {
let mut bytes = vec![];
this.0.read_to_end(&mut bytes).map_err(LuaError::external)?;
bytes
}
};
Ok(bytes)
});
methods.add_method_mut("read_to_string", |_, this: &mut LuaFile, _: ()|{
let mut data = String::new();
this.0.read_to_string(&mut data).map_err(LuaError::external)?;
Ok(data)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, bytes: Vec<u8>|{
Ok(this.0.write(bytes.as_slice()).map_err(LuaError::external)?)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, str: String|{
Ok(this.0.write(str.as_bytes()).map_err(LuaError::external)?)
});
methods.add_method_mut("flush", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.flush().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_all", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_all().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_data", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_data().map_err(LuaError::external)?)
});
methods.add_method("metadata", |_, this: &LuaFile, _: ()| {
Ok(LuaMetadata(this.0.metadata().map_err(LuaError::external)?))
});
methods.add_method_mut("seek", |_, this: &mut LuaFile, (pos, size): (Option<String>, Option<usize>)| {
let size = size.unwrap_or(0);
let seekfrom = pos.and_then(|s_pos| {
Some(match s_pos.as_ref() {
"start" => SeekFrom::Start(size as u64),
"end" => SeekFrom::End(size as i64),
"current" | _ => SeekFrom::Current(size as i64),
})
}).unwrap_or(SeekFrom::Current(size as i64));
Ok(this.0.seek(seekfrom).map_err(LuaError::external)?)
});
}
}
pub fn init(lua: &Lua) -> crate::Result<()> {
let module = lua.create_table()?;
module.set("open", lua.create_function( fs_open)? )?;
module.set("canonicalize", lua.create_function( |lua, path: String| {
match fs::canonicalize(path).map_err(|err| LuaError::external(err)) {
Ok(i) => Ok(Some(lua.create_string(&i.to_str().unwrap()).unwrap())),
_ => Ok(None)
}
})? )?;
//Deprecated for path:create_dir
module.set("create_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
let result = match all {
Some(true) => fs::create_dir_all(path),
_ => fs::create_dir(path)
};
Ok(result.is_ok())
})? )?;
//Deprecated for path:read_dir
module.set("entries", lua.create_function( |lua, path: String| {
match fs::read_dir(path) {
Ok(iter) => {
let mut arc_iter = Arc::new(Some(iter));
let f = move |_, _: ()| {
let result = match Arc::get_mut(&mut arc_iter).expect("entries iterator is mutably borrowed") {
Some(iter) => match iter.next() {
Some(Ok(entry)) => Some(entry.file_name().into_string().unwrap()),
_ => None
},
None => None
};
if result.is_none() { *Arc::get_mut(&mut arc_iter).unwrap() = None; }
Ok(result)
};
Ok(lua.create_function_mut(f)?)
}, Err(err) => Err(LuaError::ExternalError(Arc::new(::failure::Error::from_boxed_compat(Box::new(err)))))
}
})? )?;
module.set("read_dir", lua.create_function( |lua, path: String| {
let mut _list: Vec<String> = Vec::new();
for entry in fs::read_dir(path).map_err(|err| LuaError::external(err))? {
let entry = entry.map_err(|err| LuaError::external(err))?;
_list.push(entry.path().file_name().unwrap_or_default().to_string_lossy().to_string());
}
let list_value: serde_json::Value = serde_json::to_value(_list).map_err(|err| LuaError::external(err) )?;
let lua_value = rlua_serde::to_value(lua, &list_value)?;
Ok(lua_value)
})?)?;
////Deprecated for fs:read
module.set("read_file", lua.create_function( |lua, path: String| {
let data = fs::read(path).map_err(|err| LuaError::external(err))?;
Ok(lua.create_string(&String::from_utf8_lossy(&data[..]).to_owned().to_string())?)
})?)?;
module.set("chdir", lua.create_function(|_, path: String| {
env::set_current_dir(path).map_err(LuaError::external)
})?)?;
module.set("current_dir", lua.create_function(|_, _:()| {
env::current_dir().map(|path| path.to_str().map(|s| s.to_string())).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:exists
module.set("exists", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).exists())
})?)?;
//Probably deprecate for path:is_file
module.set("is_file", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_file())
})?)?;
//Probably deprecate for path:is_dir
module.set("is_dir", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_dir())
})?)?;
module.set("symlink", lua.create_function( |_, (src_path, symlink_dest): (String, String)| {
create_symlink(src_path, symlink_dest).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:remove
module.set("remove_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
match all {
Some(true) => fs::remove_dir_all(&path).map_err(LuaError::external),
_ => fs::remove_dir(&path).map_err(LuaError::external)
}
})?)?;
//TODO: Rename to something suitable other than touch
//Probably deprecate for path:create_file
module.set("touch", lua.create_function( |_, path: String| {
fs::OpenOptions::new()
.write(true)
.create(true)
.open(&path)
.map(|_| ())
.map_err(LuaError::external)
})?)?;
module.set("copy_file", lua.create_function(|_, (src, dest): (String, String)| {
copy_file(src, dest)
})?)?;
// This binding has a known side effect that this doesn't copy .git directory
module.set("copy_dir", lua.create_function(|_, (src, dest): (String, String)| {
recursive_copy(src, dest).map_err(LuaError::external)
})?)?;
//Deprecated for fs:metadata
module.set("metadata", lua.create_function( |lua, path: String| {
match fs::metadata(path) {
Ok(md) => {
let table = lua.create_table()?;
table.set("type", {
let file_type = md.file_type();
if file_type.is_file() { "file" }
else if file_type.is_dir() { "directory" }
else { unreachable!() }
})?;
table.set("size", md.len())?;
// TODO: Unix permissions when in Unix
table.set("readonly", md.permissions().readonly())?;
table.set("created", md.created().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("accessed", md.accessed().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("modified", md.modified().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
Ok(Some(table))
},
_ => Ok(None)
}
})? )?;
lua.globals().set("fs", module)?;
Ok(())
}
//TODO: Have it set to use either `syslink_file` or `syslink_dir` depending on if the endpoint is a file or directory in the `src_path`
// Probably move functions into path binding.
#[cfg(target_family = "windows")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::windows::fs::symlink_file;
symlink_file(src_path, dest)
}
#[cfg(target_family = "unix")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::unix::fs::symlink;
symlink(src_path, dest)
}
fn copy_file<S: AsRef<Path>, D: AsRef<Path>>(src: S, dest: D) -> LuaResult<()> {
let mut dest = dest.as_ref().to_path_buf();
if dest.is_dir() {
let file_name = src.as_ref()
.file_name()
.map(|s| s.to_string_lossy().to_string())
.ok_or(LuaError::external(io::Error::from(io::ErrorKind::InvalidInput)))?;
dest.push(file_name);
};
fs::copy(src, dest).map(|_| ())
.map_err(LuaError::external)
}
fn recursive_copy<A: AsRef<Path>, B: AsRef<Path>>(src: A, dest: B) -> io::Result<()> |
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lua_fs () {
let lua = Lua::new();
init(&lua).unwrap();
lua.exec::<_, ()>(r#"
for entry in fs.entries("./") do
local md = fs.metadata(entry)
print(md.type .. ": " .. entry)
end
assert(fs.canonicalize("."), "expected path")
assert(fs.canonicalize("/no/such/path/here") == nil, "expected nil")
"#, None).unwrap();
}
}
| {
let path = src.as_ref();
if !src.as_ref().exists() {
return Err(io::Error::from(io::ErrorKind::NotFound));
}
if !dest.as_ref().exists() {
fs::create_dir(&dest)?;
}
for entry in path.read_dir()? {
let src = entry.map(|e| e.path())?;
let src_name = match src.file_name().map(|s| s.to_string_lossy().to_string()) {
Some(s) => s,
None => return Err(io::Error::from(io::ErrorKind::InvalidData))
};
let re = Regex::new(r"^\.git").unwrap();
// don't copy .git directory
if re.is_match(&src_name) {
continue;
}
let dest = dest.as_ref().join(src_name);
if src.is_file() {
fs::copy(src, &dest)?;
}
else {
fs::create_dir_all(&dest)?;
recursive_copy(src, &dest)?;
}
}
Ok(())
} | identifier_body |
fs.rs | use rlua::prelude::*;
use std::{
sync::Arc,
env,
fs::{self, OpenOptions},
io::{self, SeekFrom, prelude::*},
path::Path
};
use serde_json;
use rlua_serde;
use crate::bindings::system::LuaMetadata;
use regex::Regex;
//TODO: Move to having a common interface so IO can share the same binding
pub struct LuaFile(fs::File);
pub fn fs_open(_: &Lua, (path, mode): (String, Option<String>)) -> Result<LuaFile, LuaError> {
let mut option = OpenOptions::new();
if let Some(mode) = mode {
match mode.as_ref() {
"r" => option.read(true).write(false),
"w" => option.create(true).read(false).write(true),
"w+" => option.create(true).read(true).write(true).truncate(true),
"a" => option.append(true),
"rw" | _ => option.create(true).read(true).write(true),
};
} else {
option.create(true).read(true).write(true);
}
option.open(path)
.map(LuaFile)
.map_err(LuaError::external)
}
impl LuaUserData for LuaFile {
fn add_methods<'lua, M: LuaUserDataMethods<'lua, Self>>(methods: &mut M) {
methods.add_method_mut("read", |_, this: &mut LuaFile, len: Option<usize>|{
let bytes = match len {
Some(len) => {
let mut bytes = vec![0u8; len];
this.0.read(&mut bytes).map_err(LuaError::external)?;
bytes
},
None => {
let mut bytes = vec![];
this.0.read_to_end(&mut bytes).map_err(LuaError::external)?;
bytes
}
};
Ok(bytes)
});
methods.add_method_mut("read_to_string", |_, this: &mut LuaFile, _: ()|{
let mut data = String::new();
this.0.read_to_string(&mut data).map_err(LuaError::external)?;
Ok(data)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, bytes: Vec<u8>|{
Ok(this.0.write(bytes.as_slice()).map_err(LuaError::external)?)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, str: String|{
Ok(this.0.write(str.as_bytes()).map_err(LuaError::external)?)
});
methods.add_method_mut("flush", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.flush().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_all", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_all().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_data", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_data().map_err(LuaError::external)?)
});
methods.add_method("metadata", |_, this: &LuaFile, _: ()| {
Ok(LuaMetadata(this.0.metadata().map_err(LuaError::external)?))
});
methods.add_method_mut("seek", |_, this: &mut LuaFile, (pos, size): (Option<String>, Option<usize>)| {
let size = size.unwrap_or(0);
let seekfrom = pos.and_then(|s_pos| {
Some(match s_pos.as_ref() {
"start" => SeekFrom::Start(size as u64),
"end" => SeekFrom::End(size as i64),
"current" | _ => SeekFrom::Current(size as i64),
})
}).unwrap_or(SeekFrom::Current(size as i64));
Ok(this.0.seek(seekfrom).map_err(LuaError::external)?)
});
}
}
pub fn init(lua: &Lua) -> crate::Result<()> {
let module = lua.create_table()?;
module.set("open", lua.create_function( fs_open)? )?;
module.set("canonicalize", lua.create_function( |lua, path: String| {
match fs::canonicalize(path).map_err(|err| LuaError::external(err)) {
Ok(i) => Ok(Some(lua.create_string(&i.to_str().unwrap()).unwrap())),
_ => Ok(None)
}
})? )?;
//Deprecated for path:create_dir
module.set("create_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
let result = match all {
Some(true) => fs::create_dir_all(path),
_ => fs::create_dir(path)
};
Ok(result.is_ok())
})? )?;
//Deprecated for path:read_dir
module.set("entries", lua.create_function( |lua, path: String| {
match fs::read_dir(path) {
Ok(iter) => {
let mut arc_iter = Arc::new(Some(iter));
let f = move |_, _: ()| {
let result = match Arc::get_mut(&mut arc_iter).expect("entries iterator is mutably borrowed") {
Some(iter) => match iter.next() {
Some(Ok(entry)) => Some(entry.file_name().into_string().unwrap()),
_ => None
},
None => None
};
if result.is_none() { *Arc::get_mut(&mut arc_iter).unwrap() = None; }
Ok(result)
};
Ok(lua.create_function_mut(f)?)
}, Err(err) => Err(LuaError::ExternalError(Arc::new(::failure::Error::from_boxed_compat(Box::new(err)))))
}
})? )?;
module.set("read_dir", lua.create_function( |lua, path: String| {
let mut _list: Vec<String> = Vec::new();
for entry in fs::read_dir(path).map_err(|err| LuaError::external(err))? {
let entry = entry.map_err(|err| LuaError::external(err))?;
_list.push(entry.path().file_name().unwrap_or_default().to_string_lossy().to_string());
}
let list_value: serde_json::Value = serde_json::to_value(_list).map_err(|err| LuaError::external(err) )?;
let lua_value = rlua_serde::to_value(lua, &list_value)?;
Ok(lua_value)
})?)?;
////Deprecated for fs:read
module.set("read_file", lua.create_function( |lua, path: String| {
let data = fs::read(path).map_err(|err| LuaError::external(err))?;
Ok(lua.create_string(&String::from_utf8_lossy(&data[..]).to_owned().to_string())?)
})?)?;
module.set("chdir", lua.create_function(|_, path: String| {
env::set_current_dir(path).map_err(LuaError::external)
})?)?;
module.set("current_dir", lua.create_function(|_, _:()| {
env::current_dir().map(|path| path.to_str().map(|s| s.to_string())).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:exists
module.set("exists", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).exists())
})?)?;
//Probably deprecate for path:is_file
module.set("is_file", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_file())
})?)?;
//Probably deprecate for path:is_dir
module.set("is_dir", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_dir())
})?)?;
module.set("symlink", lua.create_function( |_, (src_path, symlink_dest): (String, String)| {
create_symlink(src_path, symlink_dest).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:remove
module.set("remove_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
match all {
Some(true) => fs::remove_dir_all(&path).map_err(LuaError::external),
_ => fs::remove_dir(&path).map_err(LuaError::external)
}
})?)?;
//TODO: Rename to something suitable other than touch
//Probably deprecate for path:create_file
module.set("touch", lua.create_function( |_, path: String| {
fs::OpenOptions::new()
.write(true)
.create(true)
.open(&path)
.map(|_| ())
.map_err(LuaError::external)
})?)?;
module.set("copy_file", lua.create_function(|_, (src, dest): (String, String)| {
copy_file(src, dest)
})?)?;
// This binding has a known side effect that this doesn't copy .git directory
module.set("copy_dir", lua.create_function(|_, (src, dest): (String, String)| {
recursive_copy(src, dest).map_err(LuaError::external)
})?)?;
//Deprecated for fs:metadata
module.set("metadata", lua.create_function( |lua, path: String| {
match fs::metadata(path) {
Ok(md) => {
let table = lua.create_table()?;
table.set("type", {
let file_type = md.file_type();
if file_type.is_file() { "file" }
else if file_type.is_dir() { "directory" }
else { unreachable!() }
})?;
table.set("size", md.len())?;
// TODO: Unix permissions when in Unix
table.set("readonly", md.permissions().readonly())?;
table.set("created", md.created().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("accessed", md.accessed().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("modified", md.modified().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
Ok(Some(table))
},
_ => Ok(None)
}
})? )?;
lua.globals().set("fs", module)?;
Ok(())
}
//TODO: Have it set to use either `syslink_file` or `syslink_dir` depending on if the endpoint is a file or directory in the `src_path`
// Probably move functions into path binding.
#[cfg(target_family = "windows")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::windows::fs::symlink_file;
symlink_file(src_path, dest)
}
#[cfg(target_family = "unix")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::unix::fs::symlink;
symlink(src_path, dest)
}
fn copy_file<S: AsRef<Path>, D: AsRef<Path>>(src: S, dest: D) -> LuaResult<()> {
let mut dest = dest.as_ref().to_path_buf();
if dest.is_dir() {
let file_name = src.as_ref()
.file_name()
.map(|s| s.to_string_lossy().to_string())
.ok_or(LuaError::external(io::Error::from(io::ErrorKind::InvalidInput)))?;
dest.push(file_name);
};
fs::copy(src, dest).map(|_| ())
.map_err(LuaError::external)
}
fn | <A: AsRef<Path>, B: AsRef<Path>>(src: A, dest: B) -> io::Result<()> {
let path = src.as_ref();
if !src.as_ref().exists() {
return Err(io::Error::from(io::ErrorKind::NotFound));
}
if !dest.as_ref().exists() {
fs::create_dir(&dest)?;
}
for entry in path.read_dir()? {
let src = entry.map(|e| e.path())?;
let src_name = match src.file_name().map(|s| s.to_string_lossy().to_string()) {
Some(s) => s,
None => return Err(io::Error::from(io::ErrorKind::InvalidData))
};
let re = Regex::new(r"^\.git").unwrap();
// don't copy .git directory
if re.is_match(&src_name) {
continue;
}
let dest = dest.as_ref().join(src_name);
if src.is_file() {
fs::copy(src, &dest)?;
}
else {
fs::create_dir_all(&dest)?;
recursive_copy(src, &dest)?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lua_fs () {
let lua = Lua::new();
init(&lua).unwrap();
lua.exec::<_, ()>(r#"
for entry in fs.entries("./") do
local md = fs.metadata(entry)
print(md.type .. ": " .. entry)
end
assert(fs.canonicalize("."), "expected path")
assert(fs.canonicalize("/no/such/path/here") == nil, "expected nil")
"#, None).unwrap();
}
}
| recursive_copy | identifier_name |
fs.rs | use rlua::prelude::*;
use std::{
sync::Arc,
env,
fs::{self, OpenOptions},
io::{self, SeekFrom, prelude::*},
path::Path
};
use serde_json;
use rlua_serde;
use crate::bindings::system::LuaMetadata;
use regex::Regex;
//TODO: Move to having a common interface so IO can share the same binding
pub struct LuaFile(fs::File);
pub fn fs_open(_: &Lua, (path, mode): (String, Option<String>)) -> Result<LuaFile, LuaError> {
let mut option = OpenOptions::new();
if let Some(mode) = mode {
match mode.as_ref() {
"r" => option.read(true).write(false),
"w" => option.create(true).read(false).write(true),
"w+" => option.create(true).read(true).write(true).truncate(true),
"a" => option.append(true),
"rw" | _ => option.create(true).read(true).write(true),
};
} else {
option.create(true).read(true).write(true);
}
option.open(path)
.map(LuaFile)
.map_err(LuaError::external)
}
impl LuaUserData for LuaFile {
fn add_methods<'lua, M: LuaUserDataMethods<'lua, Self>>(methods: &mut M) {
methods.add_method_mut("read", |_, this: &mut LuaFile, len: Option<usize>|{
let bytes = match len {
Some(len) => {
let mut bytes = vec![0u8; len];
this.0.read(&mut bytes).map_err(LuaError::external)?;
bytes
},
None => {
let mut bytes = vec![];
this.0.read_to_end(&mut bytes).map_err(LuaError::external)?;
bytes
}
};
Ok(bytes)
});
methods.add_method_mut("read_to_string", |_, this: &mut LuaFile, _: ()|{
let mut data = String::new();
this.0.read_to_string(&mut data).map_err(LuaError::external)?;
Ok(data)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, bytes: Vec<u8>|{
Ok(this.0.write(bytes.as_slice()).map_err(LuaError::external)?)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, str: String|{
Ok(this.0.write(str.as_bytes()).map_err(LuaError::external)?)
});
methods.add_method_mut("flush", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.flush().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_all", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_all().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_data", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_data().map_err(LuaError::external)?)
});
methods.add_method("metadata", |_, this: &LuaFile, _: ()| {
Ok(LuaMetadata(this.0.metadata().map_err(LuaError::external)?))
});
methods.add_method_mut("seek", |_, this: &mut LuaFile, (pos, size): (Option<String>, Option<usize>)| {
let size = size.unwrap_or(0);
let seekfrom = pos.and_then(|s_pos| {
Some(match s_pos.as_ref() {
"start" => SeekFrom::Start(size as u64),
"end" => SeekFrom::End(size as i64),
"current" | _ => SeekFrom::Current(size as i64),
})
}).unwrap_or(SeekFrom::Current(size as i64));
Ok(this.0.seek(seekfrom).map_err(LuaError::external)?)
});
}
}
pub fn init(lua: &Lua) -> crate::Result<()> {
let module = lua.create_table()?;
module.set("open", lua.create_function( fs_open)? )?;
module.set("canonicalize", lua.create_function( |lua, path: String| {
match fs::canonicalize(path).map_err(|err| LuaError::external(err)) {
Ok(i) => Ok(Some(lua.create_string(&i.to_str().unwrap()).unwrap())),
_ => Ok(None)
}
})? )?;
//Deprecated for path:create_dir
module.set("create_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
let result = match all {
Some(true) => fs::create_dir_all(path),
_ => fs::create_dir(path)
};
Ok(result.is_ok())
})? )?;
//Deprecated for path:read_dir
module.set("entries", lua.create_function( |lua, path: String| {
match fs::read_dir(path) {
Ok(iter) => {
let mut arc_iter = Arc::new(Some(iter));
let f = move |_, _: ()| {
let result = match Arc::get_mut(&mut arc_iter).expect("entries iterator is mutably borrowed") {
Some(iter) => match iter.next() {
Some(Ok(entry)) => Some(entry.file_name().into_string().unwrap()),
_ => None
},
None => None
};
if result.is_none() { *Arc::get_mut(&mut arc_iter).unwrap() = None; }
Ok(result)
};
Ok(lua.create_function_mut(f)?)
}, Err(err) => Err(LuaError::ExternalError(Arc::new(::failure::Error::from_boxed_compat(Box::new(err)))))
}
})? )?;
module.set("read_dir", lua.create_function( |lua, path: String| {
let mut _list: Vec<String> = Vec::new();
for entry in fs::read_dir(path).map_err(|err| LuaError::external(err))? {
let entry = entry.map_err(|err| LuaError::external(err))?;
_list.push(entry.path().file_name().unwrap_or_default().to_string_lossy().to_string());
}
let list_value: serde_json::Value = serde_json::to_value(_list).map_err(|err| LuaError::external(err) )?;
let lua_value = rlua_serde::to_value(lua, &list_value)?;
Ok(lua_value)
})?)?;
////Deprecated for fs:read
module.set("read_file", lua.create_function( |lua, path: String| {
let data = fs::read(path).map_err(|err| LuaError::external(err))?;
Ok(lua.create_string(&String::from_utf8_lossy(&data[..]).to_owned().to_string())?)
})?)?;
module.set("chdir", lua.create_function(|_, path: String| {
env::set_current_dir(path).map_err(LuaError::external)
})?)?;
module.set("current_dir", lua.create_function(|_, _:()| {
env::current_dir().map(|path| path.to_str().map(|s| s.to_string())).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:exists
module.set("exists", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).exists())
})?)?;
//Probably deprecate for path:is_file
module.set("is_file", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_file())
})?)?;
//Probably deprecate for path:is_dir
module.set("is_dir", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_dir())
})?)?;
module.set("symlink", lua.create_function( |_, (src_path, symlink_dest): (String, String)| {
create_symlink(src_path, symlink_dest).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:remove
module.set("remove_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
match all {
Some(true) => fs::remove_dir_all(&path).map_err(LuaError::external),
_ => fs::remove_dir(&path).map_err(LuaError::external)
}
})?)?;
| fs::OpenOptions::new()
.write(true)
.create(true)
.open(&path)
.map(|_| ())
.map_err(LuaError::external)
})?)?;
module.set("copy_file", lua.create_function(|_, (src, dest): (String, String)| {
copy_file(src, dest)
})?)?;
// This binding has a known side effect that this doesn't copy .git directory
module.set("copy_dir", lua.create_function(|_, (src, dest): (String, String)| {
recursive_copy(src, dest).map_err(LuaError::external)
})?)?;
//Deprecated for fs:metadata
module.set("metadata", lua.create_function( |lua, path: String| {
match fs::metadata(path) {
Ok(md) => {
let table = lua.create_table()?;
table.set("type", {
let file_type = md.file_type();
if file_type.is_file() { "file" }
else if file_type.is_dir() { "directory" }
else { unreachable!() }
})?;
table.set("size", md.len())?;
// TODO: Unix permissions when in Unix
table.set("readonly", md.permissions().readonly())?;
table.set("created", md.created().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("accessed", md.accessed().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("modified", md.modified().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
Ok(Some(table))
},
_ => Ok(None)
}
})? )?;
lua.globals().set("fs", module)?;
Ok(())
}
//TODO: Have it set to use either `syslink_file` or `syslink_dir` depending on if the endpoint is a file or directory in the `src_path`
// Probably move functions into path binding.
#[cfg(target_family = "windows")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::windows::fs::symlink_file;
symlink_file(src_path, dest)
}
#[cfg(target_family = "unix")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::unix::fs::symlink;
symlink(src_path, dest)
}
fn copy_file<S: AsRef<Path>, D: AsRef<Path>>(src: S, dest: D) -> LuaResult<()> {
let mut dest = dest.as_ref().to_path_buf();
if dest.is_dir() {
let file_name = src.as_ref()
.file_name()
.map(|s| s.to_string_lossy().to_string())
.ok_or(LuaError::external(io::Error::from(io::ErrorKind::InvalidInput)))?;
dest.push(file_name);
};
fs::copy(src, dest).map(|_| ())
.map_err(LuaError::external)
}
fn recursive_copy<A: AsRef<Path>, B: AsRef<Path>>(src: A, dest: B) -> io::Result<()> {
let path = src.as_ref();
if !src.as_ref().exists() {
return Err(io::Error::from(io::ErrorKind::NotFound));
}
if !dest.as_ref().exists() {
fs::create_dir(&dest)?;
}
for entry in path.read_dir()? {
let src = entry.map(|e| e.path())?;
let src_name = match src.file_name().map(|s| s.to_string_lossy().to_string()) {
Some(s) => s,
None => return Err(io::Error::from(io::ErrorKind::InvalidData))
};
let re = Regex::new(r"^\.git").unwrap();
// don't copy .git directory
if re.is_match(&src_name) {
continue;
}
let dest = dest.as_ref().join(src_name);
if src.is_file() {
fs::copy(src, &dest)?;
}
else {
fs::create_dir_all(&dest)?;
recursive_copy(src, &dest)?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lua_fs () {
let lua = Lua::new();
init(&lua).unwrap();
lua.exec::<_, ()>(r#"
for entry in fs.entries("./") do
local md = fs.metadata(entry)
print(md.type .. ": " .. entry)
end
assert(fs.canonicalize("."), "expected path")
assert(fs.canonicalize("/no/such/path/here") == nil, "expected nil")
"#, None).unwrap();
}
} | //TODO: Rename to something suitable other than touch
//Probably deprecate for path:create_file
module.set("touch", lua.create_function( |_, path: String| { | random_line_split |
centrify_desktopapp.go | package centrify
import (
"errors"
"github.com/centrify/terraform-provider/cloud-golang-sdk/restapi"
)
// DesktopApp - Encapsulates a single Generic DesktopApp
type DesktopApp struct {
vaultObject
TemplateName string `json:"TemplateName,omitempty" schema:"template_name,omitempty"`
DesktopAppRunHostID string `json:"DesktopAppRunHostId,omitempty" schema:"application_host_id,omitempty"` // Application host
DesktopAppRunAccountType string `json:"DesktopAppRunAccountType,omitempty" schema:"login_credential_type,omitempty"` // Host login credential type: ADCredential, SetByUser, AlternativeAccount, SharedAccount
DesktopAppRunAccountID string `json:"DesktopAppRunAccountUuid,omitempty" schema:"application_account_id,omitempty"` // Host login credential account
DesktopAppProgramName string `json:"DesktopAppProgramName,omitempty" schema:"application_alias,omitempty"` // Application alias
DesktopAppCmdline string `json:"DesktopAppCmdlineTemplate,omitempty" schema:"command_line,omitempty"` // Command line
DesktopAppParams []DesktopAppParam `json:"DesktopAppParams,omitempty" schema:"command_parameter,omitempty"`
DefaultAuthProfile string `json:"DefaultAuthProfile" schema:"default_profile_id"`
ChallengeRules *ChallengeRules `json:"AuthRules,omitempty" schema:"challenge_rule,omitempty"`
PolicyScript string `json:"PolicyScript,omitempty" schema:"policy_script,omitempty"` // Use script to specify authentication rules (configured rules are ignored)
WorkflowEnabled bool `json:"WorkflowEnabled,omitempty" schema:"workflow_enabled,omitempty"`
}
// DesktopAppParam - desktop app command line parameters
type DesktopAppParam struct {
ParamName string `json:"ParamName,omitempty" schema:"name,omitempty"`
ParamType string `json:"ParamType,omitempty" schema:"type,omitempty"` // int, date, string, User, Role, Device, Server, VaultAccount, VaultDomain, VaultDatabase, Subscriptions, DataVault, SshKeys, system_profile
ParamValue string `json:"ParamValue,omitempty" schema:"value,omitempty"`
TargetObjectID string `json:"TargetObjectId,omitempty" schema:"target_object_id,omitempty"`
}
// NewDesktopApp is a esktopApp constructor
func NewDesktopApp(c *restapi.RestClient) *DesktopApp {
s := DesktopApp{}
s.client = c
s.apiRead = "/SaasManage/GetApplication"
s.apiCreate = "/SaasManage/ImportAppFromTemplate"
s.apiDelete = "/SaasManage/DeleteApplication"
s.apiUpdate = "/SaasManage/UpdateApplicationDE"
s.apiPermissions = "/SaasManage/SetApplicationPermissions"
return &s
}
// Read function fetches a DesktopApp from source, including attribute values. Returns error if any
func (o *DesktopApp) Read() error {
if o.ID == "" {
return errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg["_RowKey"] = o.ID
// Attempt to read from an upstream API
resp, err := o.client.CallGenericMapAPI(o.apiRead, queryArg)
if err != nil {
return err
}
if !resp.Success {
return errors.New(resp.Message)
}
fillWithMap(o, resp.Result)
LogD.Printf("Filled object: %+v", o)
return nil
}
// Create function creates a new DesktopApp and returns a map that contains creation result
func (o *DesktopApp) Create() (*restapi.SliceResponse, error) {
var queryArg = make(map[string]interface{})
queryArg["ID"] = []string{o.TemplateName}
LogD.Printf("Generated Map for Create(): %+v", queryArg)
resp, err := o.client.CallSliceAPI(o.apiCreate, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Update function updates an existing DesktopApp and returns a map that contains update result
func (o *DesktopApp) Update() (*restapi.GenericMapResponse, error) {
if o.ID == "" {
return nil, errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg, err := generateRequestMap(o)
if err != nil {
return nil, err
}
queryArg["_RowKey"] = o.ID
LogD.Printf("Generated Map for Update(): %+v", queryArg)
resp, err := o.client.CallGenericMapAPI(o.apiUpdate, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Delete function deletes a DesktopApp and returns a map that contains deletion result
func (o *DesktopApp) Delete() (*restapi.SliceResponse, error) {
if o.ID == "" {
return nil, errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg["_RowKey"] = []string{o.ID}
resp, err := o.client.CallSliceAPI(o.apiDelete, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Query function returns a single DesktopApp object in map format
func (o *DesktopApp) | () (map[string]interface{}, error) {
query := "SELECT * FROM Application WHERE 1=1 AND AppType='Desktop'"
if o.Name != "" {
query += " AND Name='" + o.Name + "'"
}
return queryVaultObject(o.client, query)
}
/*
Fetch desktop app
https://developer.centrify.com/reference#post_saasmanage-getapplication
Request body format
{
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"RRFormat": true,
"Args": {
"PageNumber": 1,
"Limit": 1,
"PageSize": 1,
"Caching": -1
}
}
Respond result
{
"success": true,
"Result": {
"IsTestApp": false,
"Icon": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunAccountUuid": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DisplayName": "AirWatch ONE UEM",
"UseDefaultSigningCert": true,
"_entitycontext": "W/\"datetime'2020-06-17T09%3A24%3A09.4903113Z'\"",
"_TableName": "application",
"Generic": true,
"LocalizationMappings": [
...
],
"State": "Active",
"RegistrationLinkMessage": null,
"DesktopAppCmdlineTemplate": "--ini=ini\\web_airwatch_webdriver.ini --username={login.Description} --password={login.SecretText}",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppProgramName": "pas_desktopapp",
"_encryptkeyid": "XXXXXXX",
"RemoteDesktopHostName": "member2",
"DesktopAppRunAccountType": "SharedAccount",
"_PartitionKey": "XXXXXX",
"RemoteDesktopUser": "shared_account (demo.lab)",
"CertificateSubjectName": "CN=Centrify Customer Application Signing Certificate",
"ParentDisplayName": null,
"_metadata": {
"Version": 1,
"IndexingVersion": 1
},
"Description": "This template allows you to provide single sign-on to a custom desktop application.",
"DesktopAppType": "CommandLine",
"AuthRules": {
"_UniqueKey": "Condition",
"_Value": [],
"Enabled": true,
"_Type": "RowSet"
},
"AppType": "Desktop",
"Name": "AirWatch ONE UEM",
"Thumbprint": "XXXXXXXXXXXXXXXXX",
"TemplateName": "GenericDesktopApplication",
"Handler": "Server.Cloud;Centrify.Server.DesktopApp.GenericDesktopAppHandler",
"DefaultAuthProfile": "AlwaysAllowed",
"AppTypeDisplayName": "Desktop",
"AuthChallengeDefinitionId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"RegistrationMessage": null,
"DesktopAppRunHostId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "/Date(1592385849490)/",
"ProvCapable": false,
"AdminTag": "Other",
"ProvSettingEnabled": false,
"ProvConfigured": false,
"ACL": "true",
"Category": "Other",
"LocalizationEnabled": false,
"DesktopAppParams": [
{
"_encryptkeyid": "XXXXXXX",
"_TableName": "applicationparams",
"TargetObjectId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "/Date(1592385849244)/",
"ParamName": "login",
"ApplicationId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamValue": "AirWatch Workspace ONE UEM Login",
"_PartitionKey": "XXXXXX",
"_entitycontext": "*",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamType": "DataVault",
"_metadata": {
"Version": 1,
"IndexingVersion": 1
}
}
]
},
"IsSoftError": false
}
Create desktop app
https://developer.centrify.com/reference#post_saasmanage-importappfromtemplate
Request body format
{
"ID": [
"GenericDesktopApplication" |"Ssms"|"Toad"|"VpxClient"
]
}
Respond result
{
"success": true,
"Result": [
{
"success": true,
"ID": "GenericDesktopApplication",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
],
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
Update desktop app
https://developer.centrify.com/reference#post_saasmanage-updateapplicationde
Request body format
{
"LocalizationEnabled": false,
"LocalizationMappings": [
...
],
"Name": "AirWatch ONE UEM",
"Description": "This template allows you to provide single sign-on to a custom desktop application.",
"Icon": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"Handler": "Server.Cloud;Centrify.Server.DesktopApp.GenericDesktopAppHandler",
"IconUri": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunHostId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunAccountUuid": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppAccountContainerId": "",
"DesktopAppAccountUuid": "",
"RemoteDesktopHostName": "member2",
"DesktopAppRunAccountType": "SharedAccount",
"RemoteDesktopUser": "shared_account (demo.lab)",
"DesktopAppProgramName": "pas_desktopapp",
"DesktopAppParams": [
{
"_encryptkeyid": "XXXXXXX",
"_TableName": "applicationparams",
"TargetObjectId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "2020-06-17T09:24:09.244Z",
"ParamName": "login",
"ApplicationId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamValue": "AirWatch Workspace ONE UEM Login",
"_PartitionKey": "XXXXXX",
"_entitycontext": "*",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamType": "DataVault",
"_metadata": {
"Version": 1,
"IndexingVersion": 1
}
}
],
"DesktopAppCmdlineTemplate": "--ini=ini\\web_airwatch_webdriver.ini --username={login.Description} --password={login.SecretText}",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
Respond result
{
"success": true,
"Result": {
"State": 0
},
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
Delete desktop app
https://developer.centrify.com/reference#post_saasmanage-deleteapplication
Request body format
{
"_RowKey": [
"xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
]
}
Respond result
{
"success": true,
"Result": [
{
"success": true,
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
],
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
*/
| Query | identifier_name |
centrify_desktopapp.go | package centrify
import (
"errors"
"github.com/centrify/terraform-provider/cloud-golang-sdk/restapi"
)
// DesktopApp - Encapsulates a single Generic DesktopApp
type DesktopApp struct {
vaultObject
TemplateName string `json:"TemplateName,omitempty" schema:"template_name,omitempty"`
DesktopAppRunHostID string `json:"DesktopAppRunHostId,omitempty" schema:"application_host_id,omitempty"` // Application host
DesktopAppRunAccountType string `json:"DesktopAppRunAccountType,omitempty" schema:"login_credential_type,omitempty"` // Host login credential type: ADCredential, SetByUser, AlternativeAccount, SharedAccount
DesktopAppRunAccountID string `json:"DesktopAppRunAccountUuid,omitempty" schema:"application_account_id,omitempty"` // Host login credential account
DesktopAppProgramName string `json:"DesktopAppProgramName,omitempty" schema:"application_alias,omitempty"` // Application alias
DesktopAppCmdline string `json:"DesktopAppCmdlineTemplate,omitempty" schema:"command_line,omitempty"` // Command line
DesktopAppParams []DesktopAppParam `json:"DesktopAppParams,omitempty" schema:"command_parameter,omitempty"`
DefaultAuthProfile string `json:"DefaultAuthProfile" schema:"default_profile_id"`
ChallengeRules *ChallengeRules `json:"AuthRules,omitempty" schema:"challenge_rule,omitempty"`
PolicyScript string `json:"PolicyScript,omitempty" schema:"policy_script,omitempty"` // Use script to specify authentication rules (configured rules are ignored)
WorkflowEnabled bool `json:"WorkflowEnabled,omitempty" schema:"workflow_enabled,omitempty"`
}
// DesktopAppParam - desktop app command line parameters
type DesktopAppParam struct {
ParamName string `json:"ParamName,omitempty" schema:"name,omitempty"`
ParamType string `json:"ParamType,omitempty" schema:"type,omitempty"` // int, date, string, User, Role, Device, Server, VaultAccount, VaultDomain, VaultDatabase, Subscriptions, DataVault, SshKeys, system_profile
ParamValue string `json:"ParamValue,omitempty" schema:"value,omitempty"`
TargetObjectID string `json:"TargetObjectId,omitempty" schema:"target_object_id,omitempty"`
}
// NewDesktopApp is a esktopApp constructor
func NewDesktopApp(c *restapi.RestClient) *DesktopApp {
s := DesktopApp{}
s.client = c
s.apiRead = "/SaasManage/GetApplication"
s.apiCreate = "/SaasManage/ImportAppFromTemplate"
s.apiDelete = "/SaasManage/DeleteApplication"
s.apiUpdate = "/SaasManage/UpdateApplicationDE"
s.apiPermissions = "/SaasManage/SetApplicationPermissions"
return &s
}
// Read function fetches a DesktopApp from source, including attribute values. Returns error if any
func (o *DesktopApp) Read() error {
if o.ID == "" {
return errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg["_RowKey"] = o.ID
// Attempt to read from an upstream API
resp, err := o.client.CallGenericMapAPI(o.apiRead, queryArg)
if err != nil {
return err
}
if !resp.Success {
return errors.New(resp.Message)
}
fillWithMap(o, resp.Result)
LogD.Printf("Filled object: %+v", o)
return nil
}
// Create function creates a new DesktopApp and returns a map that contains creation result
func (o *DesktopApp) Create() (*restapi.SliceResponse, error) {
var queryArg = make(map[string]interface{})
queryArg["ID"] = []string{o.TemplateName}
LogD.Printf("Generated Map for Create(): %+v", queryArg)
resp, err := o.client.CallSliceAPI(o.apiCreate, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Update function updates an existing DesktopApp and returns a map that contains update result
func (o *DesktopApp) Update() (*restapi.GenericMapResponse, error) {
if o.ID == "" {
return nil, errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg, err := generateRequestMap(o)
if err != nil {
return nil, err
}
queryArg["_RowKey"] = o.ID
LogD.Printf("Generated Map for Update(): %+v", queryArg)
resp, err := o.client.CallGenericMapAPI(o.apiUpdate, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Delete function deletes a DesktopApp and returns a map that contains deletion result
func (o *DesktopApp) Delete() (*restapi.SliceResponse, error) {
if o.ID == "" {
return nil, errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg["_RowKey"] = []string{o.ID}
resp, err := o.client.CallSliceAPI(o.apiDelete, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Query function returns a single DesktopApp object in map format
func (o *DesktopApp) Query() (map[string]interface{}, error) {
query := "SELECT * FROM Application WHERE 1=1 AND AppType='Desktop'"
if o.Name != "" {
query += " AND Name='" + o.Name + "'"
}
return queryVaultObject(o.client, query)
}
/*
Fetch desktop app
https://developer.centrify.com/reference#post_saasmanage-getapplication
Request body format
{
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"RRFormat": true,
"Args": {
"PageNumber": 1,
"Limit": 1,
"PageSize": 1,
"Caching": -1
}
}
Respond result
{
"success": true,
"Result": {
"IsTestApp": false,
"Icon": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunAccountUuid": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DisplayName": "AirWatch ONE UEM",
"UseDefaultSigningCert": true,
"_entitycontext": "W/\"datetime'2020-06-17T09%3A24%3A09.4903113Z'\"",
"_TableName": "application",
"Generic": true,
"LocalizationMappings": [
...
],
"State": "Active",
"RegistrationLinkMessage": null,
"DesktopAppCmdlineTemplate": "--ini=ini\\web_airwatch_webdriver.ini --username={login.Description} --password={login.SecretText}",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppProgramName": "pas_desktopapp",
"_encryptkeyid": "XXXXXXX",
"RemoteDesktopHostName": "member2",
"DesktopAppRunAccountType": "SharedAccount",
"_PartitionKey": "XXXXXX", | "Version": 1,
"IndexingVersion": 1
},
"Description": "This template allows you to provide single sign-on to a custom desktop application.",
"DesktopAppType": "CommandLine",
"AuthRules": {
"_UniqueKey": "Condition",
"_Value": [],
"Enabled": true,
"_Type": "RowSet"
},
"AppType": "Desktop",
"Name": "AirWatch ONE UEM",
"Thumbprint": "XXXXXXXXXXXXXXXXX",
"TemplateName": "GenericDesktopApplication",
"Handler": "Server.Cloud;Centrify.Server.DesktopApp.GenericDesktopAppHandler",
"DefaultAuthProfile": "AlwaysAllowed",
"AppTypeDisplayName": "Desktop",
"AuthChallengeDefinitionId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"RegistrationMessage": null,
"DesktopAppRunHostId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "/Date(1592385849490)/",
"ProvCapable": false,
"AdminTag": "Other",
"ProvSettingEnabled": false,
"ProvConfigured": false,
"ACL": "true",
"Category": "Other",
"LocalizationEnabled": false,
"DesktopAppParams": [
{
"_encryptkeyid": "XXXXXXX",
"_TableName": "applicationparams",
"TargetObjectId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "/Date(1592385849244)/",
"ParamName": "login",
"ApplicationId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamValue": "AirWatch Workspace ONE UEM Login",
"_PartitionKey": "XXXXXX",
"_entitycontext": "*",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamType": "DataVault",
"_metadata": {
"Version": 1,
"IndexingVersion": 1
}
}
]
},
"IsSoftError": false
}
Create desktop app
https://developer.centrify.com/reference#post_saasmanage-importappfromtemplate
Request body format
{
"ID": [
"GenericDesktopApplication" |"Ssms"|"Toad"|"VpxClient"
]
}
Respond result
{
"success": true,
"Result": [
{
"success": true,
"ID": "GenericDesktopApplication",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
],
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
Update desktop app
https://developer.centrify.com/reference#post_saasmanage-updateapplicationde
Request body format
{
"LocalizationEnabled": false,
"LocalizationMappings": [
...
],
"Name": "AirWatch ONE UEM",
"Description": "This template allows you to provide single sign-on to a custom desktop application.",
"Icon": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"Handler": "Server.Cloud;Centrify.Server.DesktopApp.GenericDesktopAppHandler",
"IconUri": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunHostId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunAccountUuid": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppAccountContainerId": "",
"DesktopAppAccountUuid": "",
"RemoteDesktopHostName": "member2",
"DesktopAppRunAccountType": "SharedAccount",
"RemoteDesktopUser": "shared_account (demo.lab)",
"DesktopAppProgramName": "pas_desktopapp",
"DesktopAppParams": [
{
"_encryptkeyid": "XXXXXXX",
"_TableName": "applicationparams",
"TargetObjectId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "2020-06-17T09:24:09.244Z",
"ParamName": "login",
"ApplicationId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamValue": "AirWatch Workspace ONE UEM Login",
"_PartitionKey": "XXXXXX",
"_entitycontext": "*",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamType": "DataVault",
"_metadata": {
"Version": 1,
"IndexingVersion": 1
}
}
],
"DesktopAppCmdlineTemplate": "--ini=ini\\web_airwatch_webdriver.ini --username={login.Description} --password={login.SecretText}",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
Respond result
{
"success": true,
"Result": {
"State": 0
},
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
Delete desktop app
https://developer.centrify.com/reference#post_saasmanage-deleteapplication
Request body format
{
"_RowKey": [
"xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
]
}
Respond result
{
"success": true,
"Result": [
{
"success": true,
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
],
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
*/ | "RemoteDesktopUser": "shared_account (demo.lab)",
"CertificateSubjectName": "CN=Centrify Customer Application Signing Certificate",
"ParentDisplayName": null,
"_metadata": { | random_line_split |
centrify_desktopapp.go | package centrify
import (
"errors"
"github.com/centrify/terraform-provider/cloud-golang-sdk/restapi"
)
// DesktopApp - Encapsulates a single Generic DesktopApp
type DesktopApp struct {
vaultObject
TemplateName string `json:"TemplateName,omitempty" schema:"template_name,omitempty"`
DesktopAppRunHostID string `json:"DesktopAppRunHostId,omitempty" schema:"application_host_id,omitempty"` // Application host
DesktopAppRunAccountType string `json:"DesktopAppRunAccountType,omitempty" schema:"login_credential_type,omitempty"` // Host login credential type: ADCredential, SetByUser, AlternativeAccount, SharedAccount
DesktopAppRunAccountID string `json:"DesktopAppRunAccountUuid,omitempty" schema:"application_account_id,omitempty"` // Host login credential account
DesktopAppProgramName string `json:"DesktopAppProgramName,omitempty" schema:"application_alias,omitempty"` // Application alias
DesktopAppCmdline string `json:"DesktopAppCmdlineTemplate,omitempty" schema:"command_line,omitempty"` // Command line
DesktopAppParams []DesktopAppParam `json:"DesktopAppParams,omitempty" schema:"command_parameter,omitempty"`
DefaultAuthProfile string `json:"DefaultAuthProfile" schema:"default_profile_id"`
ChallengeRules *ChallengeRules `json:"AuthRules,omitempty" schema:"challenge_rule,omitempty"`
PolicyScript string `json:"PolicyScript,omitempty" schema:"policy_script,omitempty"` // Use script to specify authentication rules (configured rules are ignored)
WorkflowEnabled bool `json:"WorkflowEnabled,omitempty" schema:"workflow_enabled,omitempty"`
}
// DesktopAppParam - desktop app command line parameters
type DesktopAppParam struct {
ParamName string `json:"ParamName,omitempty" schema:"name,omitempty"`
ParamType string `json:"ParamType,omitempty" schema:"type,omitempty"` // int, date, string, User, Role, Device, Server, VaultAccount, VaultDomain, VaultDatabase, Subscriptions, DataVault, SshKeys, system_profile
ParamValue string `json:"ParamValue,omitempty" schema:"value,omitempty"`
TargetObjectID string `json:"TargetObjectId,omitempty" schema:"target_object_id,omitempty"`
}
// NewDesktopApp is a esktopApp constructor
func NewDesktopApp(c *restapi.RestClient) *DesktopApp {
s := DesktopApp{}
s.client = c
s.apiRead = "/SaasManage/GetApplication"
s.apiCreate = "/SaasManage/ImportAppFromTemplate"
s.apiDelete = "/SaasManage/DeleteApplication"
s.apiUpdate = "/SaasManage/UpdateApplicationDE"
s.apiPermissions = "/SaasManage/SetApplicationPermissions"
return &s
}
// Read function fetches a DesktopApp from source, including attribute values. Returns error if any
func (o *DesktopApp) Read() error {
if o.ID == "" {
return errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg["_RowKey"] = o.ID
// Attempt to read from an upstream API
resp, err := o.client.CallGenericMapAPI(o.apiRead, queryArg)
if err != nil |
if !resp.Success {
return errors.New(resp.Message)
}
fillWithMap(o, resp.Result)
LogD.Printf("Filled object: %+v", o)
return nil
}
// Create function creates a new DesktopApp and returns a map that contains creation result
func (o *DesktopApp) Create() (*restapi.SliceResponse, error) {
var queryArg = make(map[string]interface{})
queryArg["ID"] = []string{o.TemplateName}
LogD.Printf("Generated Map for Create(): %+v", queryArg)
resp, err := o.client.CallSliceAPI(o.apiCreate, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Update function updates an existing DesktopApp and returns a map that contains update result
func (o *DesktopApp) Update() (*restapi.GenericMapResponse, error) {
if o.ID == "" {
return nil, errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg, err := generateRequestMap(o)
if err != nil {
return nil, err
}
queryArg["_RowKey"] = o.ID
LogD.Printf("Generated Map for Update(): %+v", queryArg)
resp, err := o.client.CallGenericMapAPI(o.apiUpdate, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Delete function deletes a DesktopApp and returns a map that contains deletion result
func (o *DesktopApp) Delete() (*restapi.SliceResponse, error) {
if o.ID == "" {
return nil, errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg["_RowKey"] = []string{o.ID}
resp, err := o.client.CallSliceAPI(o.apiDelete, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Query function returns a single DesktopApp object in map format
func (o *DesktopApp) Query() (map[string]interface{}, error) {
query := "SELECT * FROM Application WHERE 1=1 AND AppType='Desktop'"
if o.Name != "" {
query += " AND Name='" + o.Name + "'"
}
return queryVaultObject(o.client, query)
}
/*
Fetch desktop app
https://developer.centrify.com/reference#post_saasmanage-getapplication
Request body format
{
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"RRFormat": true,
"Args": {
"PageNumber": 1,
"Limit": 1,
"PageSize": 1,
"Caching": -1
}
}
Respond result
{
"success": true,
"Result": {
"IsTestApp": false,
"Icon": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunAccountUuid": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DisplayName": "AirWatch ONE UEM",
"UseDefaultSigningCert": true,
"_entitycontext": "W/\"datetime'2020-06-17T09%3A24%3A09.4903113Z'\"",
"_TableName": "application",
"Generic": true,
"LocalizationMappings": [
...
],
"State": "Active",
"RegistrationLinkMessage": null,
"DesktopAppCmdlineTemplate": "--ini=ini\\web_airwatch_webdriver.ini --username={login.Description} --password={login.SecretText}",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppProgramName": "pas_desktopapp",
"_encryptkeyid": "XXXXXXX",
"RemoteDesktopHostName": "member2",
"DesktopAppRunAccountType": "SharedAccount",
"_PartitionKey": "XXXXXX",
"RemoteDesktopUser": "shared_account (demo.lab)",
"CertificateSubjectName": "CN=Centrify Customer Application Signing Certificate",
"ParentDisplayName": null,
"_metadata": {
"Version": 1,
"IndexingVersion": 1
},
"Description": "This template allows you to provide single sign-on to a custom desktop application.",
"DesktopAppType": "CommandLine",
"AuthRules": {
"_UniqueKey": "Condition",
"_Value": [],
"Enabled": true,
"_Type": "RowSet"
},
"AppType": "Desktop",
"Name": "AirWatch ONE UEM",
"Thumbprint": "XXXXXXXXXXXXXXXXX",
"TemplateName": "GenericDesktopApplication",
"Handler": "Server.Cloud;Centrify.Server.DesktopApp.GenericDesktopAppHandler",
"DefaultAuthProfile": "AlwaysAllowed",
"AppTypeDisplayName": "Desktop",
"AuthChallengeDefinitionId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"RegistrationMessage": null,
"DesktopAppRunHostId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "/Date(1592385849490)/",
"ProvCapable": false,
"AdminTag": "Other",
"ProvSettingEnabled": false,
"ProvConfigured": false,
"ACL": "true",
"Category": "Other",
"LocalizationEnabled": false,
"DesktopAppParams": [
{
"_encryptkeyid": "XXXXXXX",
"_TableName": "applicationparams",
"TargetObjectId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "/Date(1592385849244)/",
"ParamName": "login",
"ApplicationId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamValue": "AirWatch Workspace ONE UEM Login",
"_PartitionKey": "XXXXXX",
"_entitycontext": "*",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamType": "DataVault",
"_metadata": {
"Version": 1,
"IndexingVersion": 1
}
}
]
},
"IsSoftError": false
}
Create desktop app
https://developer.centrify.com/reference#post_saasmanage-importappfromtemplate
Request body format
{
"ID": [
"GenericDesktopApplication" |"Ssms"|"Toad"|"VpxClient"
]
}
Respond result
{
"success": true,
"Result": [
{
"success": true,
"ID": "GenericDesktopApplication",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
],
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
Update desktop app
https://developer.centrify.com/reference#post_saasmanage-updateapplicationde
Request body format
{
"LocalizationEnabled": false,
"LocalizationMappings": [
...
],
"Name": "AirWatch ONE UEM",
"Description": "This template allows you to provide single sign-on to a custom desktop application.",
"Icon": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"Handler": "Server.Cloud;Centrify.Server.DesktopApp.GenericDesktopAppHandler",
"IconUri": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunHostId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunAccountUuid": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppAccountContainerId": "",
"DesktopAppAccountUuid": "",
"RemoteDesktopHostName": "member2",
"DesktopAppRunAccountType": "SharedAccount",
"RemoteDesktopUser": "shared_account (demo.lab)",
"DesktopAppProgramName": "pas_desktopapp",
"DesktopAppParams": [
{
"_encryptkeyid": "XXXXXXX",
"_TableName": "applicationparams",
"TargetObjectId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "2020-06-17T09:24:09.244Z",
"ParamName": "login",
"ApplicationId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamValue": "AirWatch Workspace ONE UEM Login",
"_PartitionKey": "XXXXXX",
"_entitycontext": "*",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamType": "DataVault",
"_metadata": {
"Version": 1,
"IndexingVersion": 1
}
}
],
"DesktopAppCmdlineTemplate": "--ini=ini\\web_airwatch_webdriver.ini --username={login.Description} --password={login.SecretText}",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
Respond result
{
"success": true,
"Result": {
"State": 0
},
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
Delete desktop app
https://developer.centrify.com/reference#post_saasmanage-deleteapplication
Request body format
{
"_RowKey": [
"xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
]
}
Respond result
{
"success": true,
"Result": [
{
"success": true,
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
],
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
*/
| {
return err
} | conditional_block |
centrify_desktopapp.go | package centrify
import (
"errors"
"github.com/centrify/terraform-provider/cloud-golang-sdk/restapi"
)
// DesktopApp - Encapsulates a single Generic DesktopApp
type DesktopApp struct {
vaultObject
TemplateName string `json:"TemplateName,omitempty" schema:"template_name,omitempty"`
DesktopAppRunHostID string `json:"DesktopAppRunHostId,omitempty" schema:"application_host_id,omitempty"` // Application host
DesktopAppRunAccountType string `json:"DesktopAppRunAccountType,omitempty" schema:"login_credential_type,omitempty"` // Host login credential type: ADCredential, SetByUser, AlternativeAccount, SharedAccount
DesktopAppRunAccountID string `json:"DesktopAppRunAccountUuid,omitempty" schema:"application_account_id,omitempty"` // Host login credential account
DesktopAppProgramName string `json:"DesktopAppProgramName,omitempty" schema:"application_alias,omitempty"` // Application alias
DesktopAppCmdline string `json:"DesktopAppCmdlineTemplate,omitempty" schema:"command_line,omitempty"` // Command line
DesktopAppParams []DesktopAppParam `json:"DesktopAppParams,omitempty" schema:"command_parameter,omitempty"`
DefaultAuthProfile string `json:"DefaultAuthProfile" schema:"default_profile_id"`
ChallengeRules *ChallengeRules `json:"AuthRules,omitempty" schema:"challenge_rule,omitempty"`
PolicyScript string `json:"PolicyScript,omitempty" schema:"policy_script,omitempty"` // Use script to specify authentication rules (configured rules are ignored)
WorkflowEnabled bool `json:"WorkflowEnabled,omitempty" schema:"workflow_enabled,omitempty"`
}
// DesktopAppParam - desktop app command line parameters
type DesktopAppParam struct {
ParamName string `json:"ParamName,omitempty" schema:"name,omitempty"`
ParamType string `json:"ParamType,omitempty" schema:"type,omitempty"` // int, date, string, User, Role, Device, Server, VaultAccount, VaultDomain, VaultDatabase, Subscriptions, DataVault, SshKeys, system_profile
ParamValue string `json:"ParamValue,omitempty" schema:"value,omitempty"`
TargetObjectID string `json:"TargetObjectId,omitempty" schema:"target_object_id,omitempty"`
}
// NewDesktopApp is a esktopApp constructor
func NewDesktopApp(c *restapi.RestClient) *DesktopApp |
// Read function fetches a DesktopApp from source, including attribute values. Returns error if any
func (o *DesktopApp) Read() error {
if o.ID == "" {
return errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg["_RowKey"] = o.ID
// Attempt to read from an upstream API
resp, err := o.client.CallGenericMapAPI(o.apiRead, queryArg)
if err != nil {
return err
}
if !resp.Success {
return errors.New(resp.Message)
}
fillWithMap(o, resp.Result)
LogD.Printf("Filled object: %+v", o)
return nil
}
// Create function creates a new DesktopApp and returns a map that contains creation result
func (o *DesktopApp) Create() (*restapi.SliceResponse, error) {
var queryArg = make(map[string]interface{})
queryArg["ID"] = []string{o.TemplateName}
LogD.Printf("Generated Map for Create(): %+v", queryArg)
resp, err := o.client.CallSliceAPI(o.apiCreate, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Update function updates an existing DesktopApp and returns a map that contains update result
func (o *DesktopApp) Update() (*restapi.GenericMapResponse, error) {
if o.ID == "" {
return nil, errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg, err := generateRequestMap(o)
if err != nil {
return nil, err
}
queryArg["_RowKey"] = o.ID
LogD.Printf("Generated Map for Update(): %+v", queryArg)
resp, err := o.client.CallGenericMapAPI(o.apiUpdate, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Delete function deletes a DesktopApp and returns a map that contains deletion result
func (o *DesktopApp) Delete() (*restapi.SliceResponse, error) {
if o.ID == "" {
return nil, errors.New("error: ID is empty")
}
var queryArg = make(map[string]interface{})
queryArg["_RowKey"] = []string{o.ID}
resp, err := o.client.CallSliceAPI(o.apiDelete, queryArg)
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Message)
}
return resp, nil
}
// Query function returns a single DesktopApp object in map format
func (o *DesktopApp) Query() (map[string]interface{}, error) {
query := "SELECT * FROM Application WHERE 1=1 AND AppType='Desktop'"
if o.Name != "" {
query += " AND Name='" + o.Name + "'"
}
return queryVaultObject(o.client, query)
}
/*
Fetch desktop app
https://developer.centrify.com/reference#post_saasmanage-getapplication
Request body format
{
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"RRFormat": true,
"Args": {
"PageNumber": 1,
"Limit": 1,
"PageSize": 1,
"Caching": -1
}
}
Respond result
{
"success": true,
"Result": {
"IsTestApp": false,
"Icon": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunAccountUuid": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DisplayName": "AirWatch ONE UEM",
"UseDefaultSigningCert": true,
"_entitycontext": "W/\"datetime'2020-06-17T09%3A24%3A09.4903113Z'\"",
"_TableName": "application",
"Generic": true,
"LocalizationMappings": [
...
],
"State": "Active",
"RegistrationLinkMessage": null,
"DesktopAppCmdlineTemplate": "--ini=ini\\web_airwatch_webdriver.ini --username={login.Description} --password={login.SecretText}",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppProgramName": "pas_desktopapp",
"_encryptkeyid": "XXXXXXX",
"RemoteDesktopHostName": "member2",
"DesktopAppRunAccountType": "SharedAccount",
"_PartitionKey": "XXXXXX",
"RemoteDesktopUser": "shared_account (demo.lab)",
"CertificateSubjectName": "CN=Centrify Customer Application Signing Certificate",
"ParentDisplayName": null,
"_metadata": {
"Version": 1,
"IndexingVersion": 1
},
"Description": "This template allows you to provide single sign-on to a custom desktop application.",
"DesktopAppType": "CommandLine",
"AuthRules": {
"_UniqueKey": "Condition",
"_Value": [],
"Enabled": true,
"_Type": "RowSet"
},
"AppType": "Desktop",
"Name": "AirWatch ONE UEM",
"Thumbprint": "XXXXXXXXXXXXXXXXX",
"TemplateName": "GenericDesktopApplication",
"Handler": "Server.Cloud;Centrify.Server.DesktopApp.GenericDesktopAppHandler",
"DefaultAuthProfile": "AlwaysAllowed",
"AppTypeDisplayName": "Desktop",
"AuthChallengeDefinitionId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"RegistrationMessage": null,
"DesktopAppRunHostId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "/Date(1592385849490)/",
"ProvCapable": false,
"AdminTag": "Other",
"ProvSettingEnabled": false,
"ProvConfigured": false,
"ACL": "true",
"Category": "Other",
"LocalizationEnabled": false,
"DesktopAppParams": [
{
"_encryptkeyid": "XXXXXXX",
"_TableName": "applicationparams",
"TargetObjectId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "/Date(1592385849244)/",
"ParamName": "login",
"ApplicationId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamValue": "AirWatch Workspace ONE UEM Login",
"_PartitionKey": "XXXXXX",
"_entitycontext": "*",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamType": "DataVault",
"_metadata": {
"Version": 1,
"IndexingVersion": 1
}
}
]
},
"IsSoftError": false
}
Create desktop app
https://developer.centrify.com/reference#post_saasmanage-importappfromtemplate
Request body format
{
"ID": [
"GenericDesktopApplication" |"Ssms"|"Toad"|"VpxClient"
]
}
Respond result
{
"success": true,
"Result": [
{
"success": true,
"ID": "GenericDesktopApplication",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
],
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
Update desktop app
https://developer.centrify.com/reference#post_saasmanage-updateapplicationde
Request body format
{
"LocalizationEnabled": false,
"LocalizationMappings": [
...
],
"Name": "AirWatch ONE UEM",
"Description": "This template allows you to provide single sign-on to a custom desktop application.",
"Icon": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"Handler": "Server.Cloud;Centrify.Server.DesktopApp.GenericDesktopAppHandler",
"IconUri": "/vfs/Application/Icons/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunHostId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppRunAccountUuid": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"DesktopAppAccountContainerId": "",
"DesktopAppAccountUuid": "",
"RemoteDesktopHostName": "member2",
"DesktopAppRunAccountType": "SharedAccount",
"RemoteDesktopUser": "shared_account (demo.lab)",
"DesktopAppProgramName": "pas_desktopapp",
"DesktopAppParams": [
{
"_encryptkeyid": "XXXXXXX",
"_TableName": "applicationparams",
"TargetObjectId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"_Timestamp": "2020-06-17T09:24:09.244Z",
"ParamName": "login",
"ApplicationId": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamValue": "AirWatch Workspace ONE UEM Login",
"_PartitionKey": "XXXXXX",
"_entitycontext": "*",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"ParamType": "DataVault",
"_metadata": {
"Version": 1,
"IndexingVersion": 1
}
}
],
"DesktopAppCmdlineTemplate": "--ini=ini\\web_airwatch_webdriver.ini --username={login.Description} --password={login.SecretText}",
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
Respond result
{
"success": true,
"Result": {
"State": 0
},
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
Delete desktop app
https://developer.centrify.com/reference#post_saasmanage-deleteapplication
Request body format
{
"_RowKey": [
"xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
]
}
Respond result
{
"success": true,
"Result": [
{
"success": true,
"_RowKey": "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx"
}
],
"Message": null,
"MessageID": null,
"Exception": null,
"ErrorID": null,
"ErrorCode": null,
"IsSoftError": false,
"InnerExceptions": null
}
*/
| {
s := DesktopApp{}
s.client = c
s.apiRead = "/SaasManage/GetApplication"
s.apiCreate = "/SaasManage/ImportAppFromTemplate"
s.apiDelete = "/SaasManage/DeleteApplication"
s.apiUpdate = "/SaasManage/UpdateApplicationDE"
s.apiPermissions = "/SaasManage/SetApplicationPermissions"
return &s
} | identifier_body |
call.component.ts | import { Component, OnInit, ViewChild, ElementRef, OnDestroy, Output, HostListener, AfterViewInit, Input } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import Swal from 'sweetalert2';
import { EventEmitter } from '@angular/core';
import { environment } from 'src/environments/environment';
import { MedicoService } from 'src/app/services/medico.service';
import { promise } from 'protractor';
declare var Peer;
declare var MultiStreamRecorder: any;
declare function ConcatenateBlobs(array:Array<any>,type:string,callback:Function)
@Component({
selector: 'app-call',
templateUrl: './call.component.html',
styleUrls: ['./call.component.scss']
})
export class CallComponent implements OnInit{
public peer:any;
public calling:boolean;
public key:string;
public myPeerId:string;
public inCall:boolean;
public mensajeTemp:string;
public recorderWithTimer;
public audioOfCall;
//Tramos de la llamada para al finalizarlos obtener el archivo
public callChunks;
@Output() public addImage:EventEmitter<string>;
public mensaje:string;
@Output() colgado:EventEmitter<any>;
@ViewChild('video',{static:true}) public video:ElementRef<HTMLVideoElement>;
@ViewChild('me',{static:false}) public me:ElementRef<HTMLVideoElement>;
@ViewChild('preview',{static:false}) public preview:ElementRef<HTMLAudioElement>;
@Output() newMessage:EventEmitter<string>;
@Output() cierreVentana: EventEmitter<void>;
@Output() markAsConnected:EventEmitter<string>;
@Output() finLlamadaPaciente:EventEmitter<void>;
conn: any;
llamada: any;
@Output() sendSMS: EventEmitter<void>;
@Output() inicioLlamada:EventEmitter<any>;
@Output() finLlamadaMedico:EventEmitter<any>;
@Input() consecutivo:string;
remoteStream: any;
localStream: any;
audioTracks: any[];
sources: any[];
dest: any;
recorder: any;
audio: any;
constructor(private _router:ActivatedRoute,private _route:Router,private __medico:MedicoService){
this.finLlamadaPaciente = new EventEmitter();
this.calling = false;
this.addImage = new EventEmitter<string>();
this.mensaje = '';
this.newMessage = new EventEmitter<string>();
this.inicioLlamada = new EventEmitter();
this.markAsConnected = new EventEmitter<string>();
this.colgado = new EventEmitter();
this.sendSMS = new EventEmitter();
this.finLlamadaMedico = new EventEmitter();
this.cierreVentana = new EventEmitter();
this.callChunks = [];
var browser = <any>navigator;
this.inCall = false;
browser.getUserMedia = (browser.getUserMedia ||
browser.webkitGetUserMedia ||
browser.mozGetUserMedia ||
browser.msGetUserMedia);
browser.getUserMedia({audio:true,video:true},(stream)=>{
this.me.nativeElement.srcObject = stream;
this.me.nativeElement.volume = 0;
this.localStream = stream;
this.me.nativeElement.play();
},()=>{
})
let documento= JSON.parse(localStorage.getItem('documento'));
if(documento){
this.peer = new Peer(documento+'rmv',{
host:environment.peerConf.ip,
path:environment.peerConf.path,
port:9000,
debug:0,
secure:environment.peerConf.secure
});
}else{
this.peer = new Peer(this.consecutivo,{
host:environment.peerConf.ip,
path:environment.peerConf.path,
secure:environment.peerConf.secure,
port:9000,
debug:0, | }
this.peer.on('connection',(conn)=>{
conn.on('data',async (data)=>{
if(data.action){
this.finLlamadaMedico.emit();
this.finLlamadaPaciente.emit();
this.inCall = false;
return;
}
if(data.cancelCall){
Swal.close({value:false});
}
if(data.reconnect){
this.conn = this.peer.connect();
this.videoConnect();
return;
}
if(data.message){
this.mensaje = this.mensaje.concat(`${data.message}\n`);
this.newMessage.emit(this.mensaje);
return;
}
if(data.key && !data.call){
this.key = data.key;
this.markAsConnected.emit(data.csc);
this.conn = this.peer.connect(data.key);
return;
}
if(data.call){
const audio = new Audio();
audio.src = "https://notificationsounds.com/notification-sounds/goes-without-saying-608/download/mp3";
audio.play();
audio.onended = ()=>{
if(!this.conn){
audio.currentTime = 0;
audio.play();
}
}
let hangout = await Swal.fire({
title:'Llamada entrante de Red Medica Vital',
showCancelButton:true,
allowOutsideClick:false
});
if(hangout.value){
this.inCall = true;
this.conn = this.peer.connect(data.key);
this.inicioLlamada.emit();
this.videoConnect();
}
if(hangout.dismiss){
this.conn.send({rechazo:true});
}
}
if(data.rechazo){
await Swal.fire({
title:'Rechazo',
text:'El beneficiario rechazo la llamada',
showConfirmButton:true
})
this.calling = false;
}
});
})
this.peer.on('call',async (call)=>{
this.inCall = true;
this.inicioLlamada.emit();
this.llamada = call;
this.llamada.on('close',()=>{
this.inCall = false;
this.video.nativeElement.srcObject = null;
this.video.nativeElement.pause();
})
browser.getUserMedia({audio:true,video:true},
(stream)=>{
call.answer(stream);
this.remoteStream = stream;
call.on('stream',(remoteStream)=>{
this.video.nativeElement.srcObject = remoteStream;
this.remoteStream = remoteStream;
var playPromise =this.video.nativeElement.play();
this.audioTracks = [this.localStream,this.remoteStream];
this.recorder = new MultiStreamRecorder(this.audioTracks);
this.recorder.mimeType = "audio/webm";
this.recorder.ondataavailable = (blob)=>{
this.callChunks.push(blob);
}
this.recorder.start(5000);
this.recorderWithTimer = new MultiStreamRecorder(this.audioTracks);
this.recorderWithTimer.mimeType = "audio/webm";
this.recorderWithTimer.start(300 * 1000);
this.recorderWithTimer.ondataavailable = (blob)=>{
//Notify to server 5 minutos de llamada
}
});
},()=>{
})
});
}
ngOnInit(): void {
this.inCall = false;
this._router.params.subscribe(params=>{
if(params.medicId){
this.key = params.medicId;
this.connect();
}
})
}
notifyCall(data){
if(this.key === null || this.key === undefined){
Swal.fire({
timer:2000,
text:'No puedes llamar a alguien que no está en sala de espera',
showConfirmButton:false,
title:'error',
icon:'error'
});
return;
}
this.calling = true;
this.conn.send(data);
}
connect(){
this.conn = this.peer.connect(this.key)
this.conn.on('open',(data)=>{
this.conn.send({key:this.peer.id,csc:this.consecutivo});
})
}
async videoConnect(){
const video = this.video.nativeElement;
var browser = <any>navigator;
if(this.key !== null && this.key !== undefined ){
browser.getUserMedia = (browser.getUserMedia ||
browser.webkitGetUserMedia ||
browser.mozGetUserMedia ||
browser.msGetUserMedia);
browser.getUserMedia({audio:true,video:true},
(stream)=>{
this.localStream = stream;
this.inCall = true;
this.llamada = this.peer.call(this.key,stream);
this.llamada.on('stream',(remoteStream)=>{
this.remoteStream = remoteStream;
video.srcObject = remoteStream;
video.play();
});
this.llamada.on('close',()=>{
this.recorder.stop();
this.inCall = false;
this.video.nativeElement.srcObject = null;
this.video.nativeElement.pause();
})
},()=>{
})
}else{
Swal.fire('El cliente aún no se encuentra conectado');
}
}
async colgar(){
this.conn.send({action:'close'})
this.recorder.stop();
this.llamada.close();
this.inCall = false;
this.video.nativeElement.pause();
}
@HostListener('window:beforeunload', ['$event'])
onWindowClose(event: BeforeUnloadEvent):void {
event.returnValue = true;
this.cierreVentana.emit();
this.colgar();
}
async confirm():Promise<boolean>{
let {dismiss} = await Swal.fire({
title:'Seguro desea salir?',
text:'Abandonará una llamada',
showCancelButton:true,
cancelButtonText:'Permanecer aquí',
confirmButtonText:'Seguro que quiero salir',
})
return !dismiss
}
enviarMensaje(){
this.mensaje = this.mensaje.concat(`${this.mensajeTemp}\n`);
this.conn.send({message:this.mensajeTemp});
this.newMessage.emit(this.mensaje);
this.mensajeTemp = '';
}
enviarSMS(){
this.sendSMS.emit();
}
imageCapture(){
const canvas = document.createElement('canvas');
canvas.width = this.video.nativeElement.videoWidth;
canvas.height = this.video.nativeElement.videoHeight;
canvas.getContext("2d").drawImage(this.video.nativeElement,0,0);
let imgUrl;
canvas.toBlob(async (blob)=>{
imgUrl = window.URL.createObjectURL(blob);
let {value,dismiss} = await Swal.fire({
imageUrl:window.URL.createObjectURL(blob),
title:'Desea guardar esta imagen en la lista de caputras de esta teleconsulta?',
showCancelButton:true
});
if(value){
this.addImage.emit(imgUrl);
}
});
}
reconect(){
this.conn.send({reconnect:true});
}
async cancelCall(){
this.calling = false;
this.conn.send({cancelCall:true});
}
finalizar(){
ConcatenateBlobs(this.callChunks, 'audio/webm', (resultingBlob) =>{
this.audio = resultingBlob;
this.colgado.emit();
});
}
getAudio(){
return new Promise((res,rej)=>{
ConcatenateBlobs(this.callChunks, 'audio/webm', async (resultingBlob) =>{
this.recorder.stop();
res(resultingBlob);
})
})
}
// get audioBlob(){
// ConcatenateBlobs(this.callChunks, 'audio/webm', async (resultingBlob) =>{
// this.audio = resultingBlob;
// // this.preview.nativeElement.src = URL.createObjectURL(resultingBlob);
// });
// return null;
// }
}
/*
const audioCtx = new AudioContext();
console.log(this.localStream);
const dest =audioCtx.createMediaStreamDestination();
let localsource = audioCtx.createMediaStreamSource(this.localStream);
let remoteSource = audioCtx.createMediaStreamSource(this.remoteStream);
localsource.connect(dest);
remoteSource.connect(dest);
console.log(dest.stream.getTracks()[0])
*/ | // secure:true
}); | random_line_split |
call.component.ts | import { Component, OnInit, ViewChild, ElementRef, OnDestroy, Output, HostListener, AfterViewInit, Input } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import Swal from 'sweetalert2';
import { EventEmitter } from '@angular/core';
import { environment } from 'src/environments/environment';
import { MedicoService } from 'src/app/services/medico.service';
import { promise } from 'protractor';
declare var Peer;
declare var MultiStreamRecorder: any;
declare function ConcatenateBlobs(array:Array<any>,type:string,callback:Function)
@Component({
selector: 'app-call',
templateUrl: './call.component.html',
styleUrls: ['./call.component.scss']
})
export class CallComponent implements OnInit{
public peer:any;
public calling:boolean;
public key:string;
public myPeerId:string;
public inCall:boolean;
public mensajeTemp:string;
public recorderWithTimer;
public audioOfCall;
//Tramos de la llamada para al finalizarlos obtener el archivo
public callChunks;
@Output() public addImage:EventEmitter<string>;
public mensaje:string;
@Output() colgado:EventEmitter<any>;
@ViewChild('video',{static:true}) public video:ElementRef<HTMLVideoElement>;
@ViewChild('me',{static:false}) public me:ElementRef<HTMLVideoElement>;
@ViewChild('preview',{static:false}) public preview:ElementRef<HTMLAudioElement>;
@Output() newMessage:EventEmitter<string>;
@Output() cierreVentana: EventEmitter<void>;
@Output() markAsConnected:EventEmitter<string>;
@Output() finLlamadaPaciente:EventEmitter<void>;
conn: any;
llamada: any;
@Output() sendSMS: EventEmitter<void>;
@Output() inicioLlamada:EventEmitter<any>;
@Output() finLlamadaMedico:EventEmitter<any>;
@Input() consecutivo:string;
remoteStream: any;
localStream: any;
audioTracks: any[];
sources: any[];
dest: any;
recorder: any;
audio: any;
constructor(private _router:ActivatedRoute,private _route:Router,private __medico:MedicoService){
this.finLlamadaPaciente = new EventEmitter();
this.calling = false;
this.addImage = new EventEmitter<string>();
this.mensaje = '';
this.newMessage = new EventEmitter<string>();
this.inicioLlamada = new EventEmitter();
this.markAsConnected = new EventEmitter<string>();
this.colgado = new EventEmitter();
this.sendSMS = new EventEmitter();
this.finLlamadaMedico = new EventEmitter();
this.cierreVentana = new EventEmitter();
this.callChunks = [];
var browser = <any>navigator;
this.inCall = false;
browser.getUserMedia = (browser.getUserMedia ||
browser.webkitGetUserMedia ||
browser.mozGetUserMedia ||
browser.msGetUserMedia);
browser.getUserMedia({audio:true,video:true},(stream)=>{
this.me.nativeElement.srcObject = stream;
this.me.nativeElement.volume = 0;
this.localStream = stream;
this.me.nativeElement.play();
},()=>{
})
let documento= JSON.parse(localStorage.getItem('documento'));
if(documento){
this.peer = new Peer(documento+'rmv',{
host:environment.peerConf.ip,
path:environment.peerConf.path,
port:9000,
debug:0,
secure:environment.peerConf.secure
});
}else{
this.peer = new Peer(this.consecutivo,{
host:environment.peerConf.ip,
path:environment.peerConf.path,
secure:environment.peerConf.secure,
port:9000,
debug:0,
// secure:true
});
}
this.peer.on('connection',(conn)=>{
conn.on('data',async (data)=>{
if(data.action){
this.finLlamadaMedico.emit();
this.finLlamadaPaciente.emit();
this.inCall = false;
return;
}
if(data.cancelCall){
Swal.close({value:false});
}
if(data.reconnect){
this.conn = this.peer.connect();
this.videoConnect();
return;
}
if(data.message){
this.mensaje = this.mensaje.concat(`${data.message}\n`);
this.newMessage.emit(this.mensaje);
return;
}
if(data.key && !data.call){
this.key = data.key;
this.markAsConnected.emit(data.csc);
this.conn = this.peer.connect(data.key);
return;
}
if(data.call){
const audio = new Audio();
audio.src = "https://notificationsounds.com/notification-sounds/goes-without-saying-608/download/mp3";
audio.play();
audio.onended = ()=>{
if(!this.conn){
audio.currentTime = 0;
audio.play();
}
}
let hangout = await Swal.fire({
title:'Llamada entrante de Red Medica Vital',
showCancelButton:true,
allowOutsideClick:false
});
if(hangout.value){
this.inCall = true;
this.conn = this.peer.connect(data.key);
this.inicioLlamada.emit();
this.videoConnect();
}
if(hangout.dismiss){
this.conn.send({rechazo:true});
}
}
if(data.rechazo){
await Swal.fire({
title:'Rechazo',
text:'El beneficiario rechazo la llamada',
showConfirmButton:true
})
this.calling = false;
}
});
})
this.peer.on('call',async (call)=>{
this.inCall = true;
this.inicioLlamada.emit();
this.llamada = call;
this.llamada.on('close',()=>{
this.inCall = false;
this.video.nativeElement.srcObject = null;
this.video.nativeElement.pause();
})
browser.getUserMedia({audio:true,video:true},
(stream)=>{
call.answer(stream);
this.remoteStream = stream;
call.on('stream',(remoteStream)=>{
this.video.nativeElement.srcObject = remoteStream;
this.remoteStream = remoteStream;
var playPromise =this.video.nativeElement.play();
this.audioTracks = [this.localStream,this.remoteStream];
this.recorder = new MultiStreamRecorder(this.audioTracks);
this.recorder.mimeType = "audio/webm";
this.recorder.ondataavailable = (blob)=>{
this.callChunks.push(blob);
}
this.recorder.start(5000);
this.recorderWithTimer = new MultiStreamRecorder(this.audioTracks);
this.recorderWithTimer.mimeType = "audio/webm";
this.recorderWithTimer.start(300 * 1000);
this.recorderWithTimer.ondataavailable = (blob)=>{
//Notify to server 5 minutos de llamada
}
});
},()=>{
})
});
}
ngOnInit(): void {
this.inCall = false;
this._router.params.subscribe(params=>{
if(params.medicId){
this.key = params.medicId;
this.connect();
}
})
}
notifyCall(data){
if(this.key === null || this.key === undefined){
Swal.fire({
timer:2000,
text:'No puedes llamar a alguien que no está en sala de espera',
showConfirmButton:false,
title:'error',
icon:'error'
});
return;
}
this.calling = true;
this.conn.send(data);
}
connect(){
this.conn = this.peer.connect(this.key)
this.conn.on('open',(data)=>{
this.conn.send({key:this.peer.id,csc:this.consecutivo});
})
}
async videoConnect(){
const video = this.video.nativeElement;
var browser = <any>navigator;
if(this.key !== null && this.key !== undefined ){
browser.getUserMedia = (browser.getUserMedia ||
browser.webkitGetUserMedia ||
browser.mozGetUserMedia ||
browser.msGetUserMedia);
browser.getUserMedia({audio:true,video:true},
(stream)=>{
this.localStream = stream;
this.inCall = true;
this.llamada = this.peer.call(this.key,stream);
this.llamada.on('stream',(remoteStream)=>{
this.remoteStream = remoteStream;
video.srcObject = remoteStream;
video.play();
});
this.llamada.on('close',()=>{
this.recorder.stop();
this.inCall = false;
this.video.nativeElement.srcObject = null;
this.video.nativeElement.pause();
})
},()=>{
})
}else{
Swal.fire('El cliente aún no se encuentra conectado');
}
}
async colgar(){
this.conn.send({action:'close'})
this.recorder.stop();
this.llamada.close();
this.inCall = false;
this.video.nativeElement.pause();
}
@HostListener('window:beforeunload', ['$event'])
onWindowClose(event: BeforeUnloadEvent):void {
| async confirm():Promise<boolean>{
let {dismiss} = await Swal.fire({
title:'Seguro desea salir?',
text:'Abandonará una llamada',
showCancelButton:true,
cancelButtonText:'Permanecer aquí',
confirmButtonText:'Seguro que quiero salir',
})
return !dismiss
}
enviarMensaje(){
this.mensaje = this.mensaje.concat(`${this.mensajeTemp}\n`);
this.conn.send({message:this.mensajeTemp});
this.newMessage.emit(this.mensaje);
this.mensajeTemp = '';
}
enviarSMS(){
this.sendSMS.emit();
}
imageCapture(){
const canvas = document.createElement('canvas');
canvas.width = this.video.nativeElement.videoWidth;
canvas.height = this.video.nativeElement.videoHeight;
canvas.getContext("2d").drawImage(this.video.nativeElement,0,0);
let imgUrl;
canvas.toBlob(async (blob)=>{
imgUrl = window.URL.createObjectURL(blob);
let {value,dismiss} = await Swal.fire({
imageUrl:window.URL.createObjectURL(blob),
title:'Desea guardar esta imagen en la lista de caputras de esta teleconsulta?',
showCancelButton:true
});
if(value){
this.addImage.emit(imgUrl);
}
});
}
reconect(){
this.conn.send({reconnect:true});
}
async cancelCall(){
this.calling = false;
this.conn.send({cancelCall:true});
}
finalizar(){
ConcatenateBlobs(this.callChunks, 'audio/webm', (resultingBlob) =>{
this.audio = resultingBlob;
this.colgado.emit();
});
}
getAudio(){
return new Promise((res,rej)=>{
ConcatenateBlobs(this.callChunks, 'audio/webm', async (resultingBlob) =>{
this.recorder.stop();
res(resultingBlob);
})
})
}
// get audioBlob(){
// ConcatenateBlobs(this.callChunks, 'audio/webm', async (resultingBlob) =>{
// this.audio = resultingBlob;
// // this.preview.nativeElement.src = URL.createObjectURL(resultingBlob);
// });
// return null;
// }
}
/*
const audioCtx = new AudioContext();
console.log(this.localStream);
const dest =audioCtx.createMediaStreamDestination();
let localsource = audioCtx.createMediaStreamSource(this.localStream);
let remoteSource = audioCtx.createMediaStreamSource(this.remoteStream);
localsource.connect(dest);
remoteSource.connect(dest);
console.log(dest.stream.getTracks()[0])
*/ | event.returnValue = true;
this.cierreVentana.emit();
this.colgar();
}
| identifier_body |
call.component.ts | import { Component, OnInit, ViewChild, ElementRef, OnDestroy, Output, HostListener, AfterViewInit, Input } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import Swal from 'sweetalert2';
import { EventEmitter } from '@angular/core';
import { environment } from 'src/environments/environment';
import { MedicoService } from 'src/app/services/medico.service';
import { promise } from 'protractor';
declare var Peer;
declare var MultiStreamRecorder: any;
declare function ConcatenateBlobs(array:Array<any>,type:string,callback:Function)
@Component({
selector: 'app-call',
templateUrl: './call.component.html',
styleUrls: ['./call.component.scss']
})
export class CallComponent implements OnInit{
public peer:any;
public calling:boolean;
public key:string;
public myPeerId:string;
public inCall:boolean;
public mensajeTemp:string;
public recorderWithTimer;
public audioOfCall;
//Tramos de la llamada para al finalizarlos obtener el archivo
public callChunks;
@Output() public addImage:EventEmitter<string>;
public mensaje:string;
@Output() colgado:EventEmitter<any>;
@ViewChild('video',{static:true}) public video:ElementRef<HTMLVideoElement>;
@ViewChild('me',{static:false}) public me:ElementRef<HTMLVideoElement>;
@ViewChild('preview',{static:false}) public preview:ElementRef<HTMLAudioElement>;
@Output() newMessage:EventEmitter<string>;
@Output() cierreVentana: EventEmitter<void>;
@Output() markAsConnected:EventEmitter<string>;
@Output() finLlamadaPaciente:EventEmitter<void>;
conn: any;
llamada: any;
@Output() sendSMS: EventEmitter<void>;
@Output() inicioLlamada:EventEmitter<any>;
@Output() finLlamadaMedico:EventEmitter<any>;
@Input() consecutivo:string;
remoteStream: any;
localStream: any;
audioTracks: any[];
sources: any[];
dest: any;
recorder: any;
audio: any;
constructor(private _router:ActivatedRoute,private _route:Router,private __medico:MedicoService){
this.finLlamadaPaciente = new EventEmitter();
this.calling = false;
this.addImage = new EventEmitter<string>();
this.mensaje = '';
this.newMessage = new EventEmitter<string>();
this.inicioLlamada = new EventEmitter();
this.markAsConnected = new EventEmitter<string>();
this.colgado = new EventEmitter();
this.sendSMS = new EventEmitter();
this.finLlamadaMedico = new EventEmitter();
this.cierreVentana = new EventEmitter();
this.callChunks = [];
var browser = <any>navigator;
this.inCall = false;
browser.getUserMedia = (browser.getUserMedia ||
browser.webkitGetUserMedia ||
browser.mozGetUserMedia ||
browser.msGetUserMedia);
browser.getUserMedia({audio:true,video:true},(stream)=>{
this.me.nativeElement.srcObject = stream;
this.me.nativeElement.volume = 0;
this.localStream = stream;
this.me.nativeElement.play();
},()=>{
})
let documento= JSON.parse(localStorage.getItem('documento'));
if(documento){
this.peer = new Peer(documento+'rmv',{
host:environment.peerConf.ip,
path:environment.peerConf.path,
port:9000,
debug:0,
secure:environment.peerConf.secure
});
}else{
this.peer = new Peer(this.consecutivo,{
host:environment.peerConf.ip,
path:environment.peerConf.path,
secure:environment.peerConf.secure,
port:9000,
debug:0,
// secure:true
});
}
this.peer.on('connection',(conn)=>{
conn.on('data',async (data)=>{
if(data.action){
this.finLlamadaMedico.emit();
this.finLlamadaPaciente.emit();
this.inCall = false;
return;
}
if(data.cancelCall){
Swal.close({value:false});
}
if(data.reconnect){
this.conn = this.peer.connect();
this.videoConnect();
return;
}
if(data.message){
this.mensaje = this.mensaje.concat(`${data.message}\n`);
this.newMessage.emit(this.mensaje);
return;
}
if(data.key && !data.call) |
if(data.call){
const audio = new Audio();
audio.src = "https://notificationsounds.com/notification-sounds/goes-without-saying-608/download/mp3";
audio.play();
audio.onended = ()=>{
if(!this.conn){
audio.currentTime = 0;
audio.play();
}
}
let hangout = await Swal.fire({
title:'Llamada entrante de Red Medica Vital',
showCancelButton:true,
allowOutsideClick:false
});
if(hangout.value){
this.inCall = true;
this.conn = this.peer.connect(data.key);
this.inicioLlamada.emit();
this.videoConnect();
}
if(hangout.dismiss){
this.conn.send({rechazo:true});
}
}
if(data.rechazo){
await Swal.fire({
title:'Rechazo',
text:'El beneficiario rechazo la llamada',
showConfirmButton:true
})
this.calling = false;
}
});
})
this.peer.on('call',async (call)=>{
this.inCall = true;
this.inicioLlamada.emit();
this.llamada = call;
this.llamada.on('close',()=>{
this.inCall = false;
this.video.nativeElement.srcObject = null;
this.video.nativeElement.pause();
})
browser.getUserMedia({audio:true,video:true},
(stream)=>{
call.answer(stream);
this.remoteStream = stream;
call.on('stream',(remoteStream)=>{
this.video.nativeElement.srcObject = remoteStream;
this.remoteStream = remoteStream;
var playPromise =this.video.nativeElement.play();
this.audioTracks = [this.localStream,this.remoteStream];
this.recorder = new MultiStreamRecorder(this.audioTracks);
this.recorder.mimeType = "audio/webm";
this.recorder.ondataavailable = (blob)=>{
this.callChunks.push(blob);
}
this.recorder.start(5000);
this.recorderWithTimer = new MultiStreamRecorder(this.audioTracks);
this.recorderWithTimer.mimeType = "audio/webm";
this.recorderWithTimer.start(300 * 1000);
this.recorderWithTimer.ondataavailable = (blob)=>{
//Notify to server 5 minutos de llamada
}
});
},()=>{
})
});
}
ngOnInit(): void {
this.inCall = false;
this._router.params.subscribe(params=>{
if(params.medicId){
this.key = params.medicId;
this.connect();
}
})
}
notifyCall(data){
if(this.key === null || this.key === undefined){
Swal.fire({
timer:2000,
text:'No puedes llamar a alguien que no está en sala de espera',
showConfirmButton:false,
title:'error',
icon:'error'
});
return;
}
this.calling = true;
this.conn.send(data);
}
connect(){
this.conn = this.peer.connect(this.key)
this.conn.on('open',(data)=>{
this.conn.send({key:this.peer.id,csc:this.consecutivo});
})
}
async videoConnect(){
const video = this.video.nativeElement;
var browser = <any>navigator;
if(this.key !== null && this.key !== undefined ){
browser.getUserMedia = (browser.getUserMedia ||
browser.webkitGetUserMedia ||
browser.mozGetUserMedia ||
browser.msGetUserMedia);
browser.getUserMedia({audio:true,video:true},
(stream)=>{
this.localStream = stream;
this.inCall = true;
this.llamada = this.peer.call(this.key,stream);
this.llamada.on('stream',(remoteStream)=>{
this.remoteStream = remoteStream;
video.srcObject = remoteStream;
video.play();
});
this.llamada.on('close',()=>{
this.recorder.stop();
this.inCall = false;
this.video.nativeElement.srcObject = null;
this.video.nativeElement.pause();
})
},()=>{
})
}else{
Swal.fire('El cliente aún no se encuentra conectado');
}
}
async colgar(){
this.conn.send({action:'close'})
this.recorder.stop();
this.llamada.close();
this.inCall = false;
this.video.nativeElement.pause();
}
@HostListener('window:beforeunload', ['$event'])
onWindowClose(event: BeforeUnloadEvent):void {
event.returnValue = true;
this.cierreVentana.emit();
this.colgar();
}
async confirm():Promise<boolean>{
let {dismiss} = await Swal.fire({
title:'Seguro desea salir?',
text:'Abandonará una llamada',
showCancelButton:true,
cancelButtonText:'Permanecer aquí',
confirmButtonText:'Seguro que quiero salir',
})
return !dismiss
}
enviarMensaje(){
this.mensaje = this.mensaje.concat(`${this.mensajeTemp}\n`);
this.conn.send({message:this.mensajeTemp});
this.newMessage.emit(this.mensaje);
this.mensajeTemp = '';
}
enviarSMS(){
this.sendSMS.emit();
}
imageCapture(){
const canvas = document.createElement('canvas');
canvas.width = this.video.nativeElement.videoWidth;
canvas.height = this.video.nativeElement.videoHeight;
canvas.getContext("2d").drawImage(this.video.nativeElement,0,0);
let imgUrl;
canvas.toBlob(async (blob)=>{
imgUrl = window.URL.createObjectURL(blob);
let {value,dismiss} = await Swal.fire({
imageUrl:window.URL.createObjectURL(blob),
title:'Desea guardar esta imagen en la lista de caputras de esta teleconsulta?',
showCancelButton:true
});
if(value){
this.addImage.emit(imgUrl);
}
});
}
reconect(){
this.conn.send({reconnect:true});
}
async cancelCall(){
this.calling = false;
this.conn.send({cancelCall:true});
}
finalizar(){
ConcatenateBlobs(this.callChunks, 'audio/webm', (resultingBlob) =>{
this.audio = resultingBlob;
this.colgado.emit();
});
}
getAudio(){
return new Promise((res,rej)=>{
ConcatenateBlobs(this.callChunks, 'audio/webm', async (resultingBlob) =>{
this.recorder.stop();
res(resultingBlob);
})
})
}
// get audioBlob(){
// ConcatenateBlobs(this.callChunks, 'audio/webm', async (resultingBlob) =>{
// this.audio = resultingBlob;
// // this.preview.nativeElement.src = URL.createObjectURL(resultingBlob);
// });
// return null;
// }
}
/*
const audioCtx = new AudioContext();
console.log(this.localStream);
const dest =audioCtx.createMediaStreamDestination();
let localsource = audioCtx.createMediaStreamSource(this.localStream);
let remoteSource = audioCtx.createMediaStreamSource(this.remoteStream);
localsource.connect(dest);
remoteSource.connect(dest);
console.log(dest.stream.getTracks()[0])
*/ | {
this.key = data.key;
this.markAsConnected.emit(data.csc);
this.conn = this.peer.connect(data.key);
return;
} | conditional_block |
call.component.ts | import { Component, OnInit, ViewChild, ElementRef, OnDestroy, Output, HostListener, AfterViewInit, Input } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import Swal from 'sweetalert2';
import { EventEmitter } from '@angular/core';
import { environment } from 'src/environments/environment';
import { MedicoService } from 'src/app/services/medico.service';
import { promise } from 'protractor';
declare var Peer;
declare var MultiStreamRecorder: any;
declare function ConcatenateBlobs(array:Array<any>,type:string,callback:Function)
@Component({
selector: 'app-call',
templateUrl: './call.component.html',
styleUrls: ['./call.component.scss']
})
export class CallComponent implements OnInit{
public peer:any;
public calling:boolean;
public key:string;
public myPeerId:string;
public inCall:boolean;
public mensajeTemp:string;
public recorderWithTimer;
public audioOfCall;
//Tramos de la llamada para al finalizarlos obtener el archivo
public callChunks;
@Output() public addImage:EventEmitter<string>;
public mensaje:string;
@Output() colgado:EventEmitter<any>;
@ViewChild('video',{static:true}) public video:ElementRef<HTMLVideoElement>;
@ViewChild('me',{static:false}) public me:ElementRef<HTMLVideoElement>;
@ViewChild('preview',{static:false}) public preview:ElementRef<HTMLAudioElement>;
@Output() newMessage:EventEmitter<string>;
@Output() cierreVentana: EventEmitter<void>;
@Output() markAsConnected:EventEmitter<string>;
@Output() finLlamadaPaciente:EventEmitter<void>;
conn: any;
llamada: any;
@Output() sendSMS: EventEmitter<void>;
@Output() inicioLlamada:EventEmitter<any>;
@Output() finLlamadaMedico:EventEmitter<any>;
@Input() consecutivo:string;
remoteStream: any;
localStream: any;
audioTracks: any[];
sources: any[];
dest: any;
recorder: any;
audio: any;
constructor(private _router:ActivatedRoute,private _route:Router,private __medico:MedicoService){
this.finLlamadaPaciente = new EventEmitter();
this.calling = false;
this.addImage = new EventEmitter<string>();
this.mensaje = '';
this.newMessage = new EventEmitter<string>();
this.inicioLlamada = new EventEmitter();
this.markAsConnected = new EventEmitter<string>();
this.colgado = new EventEmitter();
this.sendSMS = new EventEmitter();
this.finLlamadaMedico = new EventEmitter();
this.cierreVentana = new EventEmitter();
this.callChunks = [];
var browser = <any>navigator;
this.inCall = false;
browser.getUserMedia = (browser.getUserMedia ||
browser.webkitGetUserMedia ||
browser.mozGetUserMedia ||
browser.msGetUserMedia);
browser.getUserMedia({audio:true,video:true},(stream)=>{
this.me.nativeElement.srcObject = stream;
this.me.nativeElement.volume = 0;
this.localStream = stream;
this.me.nativeElement.play();
},()=>{
})
let documento= JSON.parse(localStorage.getItem('documento'));
if(documento){
this.peer = new Peer(documento+'rmv',{
host:environment.peerConf.ip,
path:environment.peerConf.path,
port:9000,
debug:0,
secure:environment.peerConf.secure
});
}else{
this.peer = new Peer(this.consecutivo,{
host:environment.peerConf.ip,
path:environment.peerConf.path,
secure:environment.peerConf.secure,
port:9000,
debug:0,
// secure:true
});
}
this.peer.on('connection',(conn)=>{
conn.on('data',async (data)=>{
if(data.action){
this.finLlamadaMedico.emit();
this.finLlamadaPaciente.emit();
this.inCall = false;
return;
}
if(data.cancelCall){
Swal.close({value:false});
}
if(data.reconnect){
this.conn = this.peer.connect();
this.videoConnect();
return;
}
if(data.message){
this.mensaje = this.mensaje.concat(`${data.message}\n`);
this.newMessage.emit(this.mensaje);
return;
}
if(data.key && !data.call){
this.key = data.key;
this.markAsConnected.emit(data.csc);
this.conn = this.peer.connect(data.key);
return;
}
if(data.call){
const audio = new Audio();
audio.src = "https://notificationsounds.com/notification-sounds/goes-without-saying-608/download/mp3";
audio.play();
audio.onended = ()=>{
if(!this.conn){
audio.currentTime = 0;
audio.play();
}
}
let hangout = await Swal.fire({
title:'Llamada entrante de Red Medica Vital',
showCancelButton:true,
allowOutsideClick:false
});
if(hangout.value){
this.inCall = true;
this.conn = this.peer.connect(data.key);
this.inicioLlamada.emit();
this.videoConnect();
}
if(hangout.dismiss){
this.conn.send({rechazo:true});
}
}
if(data.rechazo){
await Swal.fire({
title:'Rechazo',
text:'El beneficiario rechazo la llamada',
showConfirmButton:true
})
this.calling = false;
}
});
})
this.peer.on('call',async (call)=>{
this.inCall = true;
this.inicioLlamada.emit();
this.llamada = call;
this.llamada.on('close',()=>{
this.inCall = false;
this.video.nativeElement.srcObject = null;
this.video.nativeElement.pause();
})
browser.getUserMedia({audio:true,video:true},
(stream)=>{
call.answer(stream);
this.remoteStream = stream;
call.on('stream',(remoteStream)=>{
this.video.nativeElement.srcObject = remoteStream;
this.remoteStream = remoteStream;
var playPromise =this.video.nativeElement.play();
this.audioTracks = [this.localStream,this.remoteStream];
this.recorder = new MultiStreamRecorder(this.audioTracks);
this.recorder.mimeType = "audio/webm";
this.recorder.ondataavailable = (blob)=>{
this.callChunks.push(blob);
}
this.recorder.start(5000);
this.recorderWithTimer = new MultiStreamRecorder(this.audioTracks);
this.recorderWithTimer.mimeType = "audio/webm";
this.recorderWithTimer.start(300 * 1000);
this.recorderWithTimer.ondataavailable = (blob)=>{
//Notify to server 5 minutos de llamada
}
});
},()=>{
})
});
}
ngOnInit(): void {
this.inCall = false;
this._router.params.subscribe(params=>{
if(params.medicId){
this.key = params.medicId;
this.connect();
}
})
}
notifyCall(data){
if(this.key === null || this.key === undefined){
Swal.fire({
timer:2000,
text:'No puedes llamar a alguien que no está en sala de espera',
showConfirmButton:false,
title:'error',
icon:'error'
});
return;
}
this.calling = true;
this.conn.send(data);
}
connect(){
this.conn = this.peer.connect(this.key)
this.conn.on('open',(data)=>{
this.conn.send({key:this.peer.id,csc:this.consecutivo});
})
}
async videoConnect(){
const video = this.video.nativeElement;
var browser = <any>navigator;
if(this.key !== null && this.key !== undefined ){
browser.getUserMedia = (browser.getUserMedia ||
browser.webkitGetUserMedia ||
browser.mozGetUserMedia ||
browser.msGetUserMedia);
browser.getUserMedia({audio:true,video:true},
(stream)=>{
this.localStream = stream;
this.inCall = true;
this.llamada = this.peer.call(this.key,stream);
this.llamada.on('stream',(remoteStream)=>{
this.remoteStream = remoteStream;
video.srcObject = remoteStream;
video.play();
});
this.llamada.on('close',()=>{
this.recorder.stop();
this.inCall = false;
this.video.nativeElement.srcObject = null;
this.video.nativeElement.pause();
})
},()=>{
})
}else{
Swal.fire('El cliente aún no se encuentra conectado');
}
}
async colgar(){
this.conn.send({action:'close'})
this.recorder.stop();
this.llamada.close();
this.inCall = false;
this.video.nativeElement.pause();
}
@HostListener('window:beforeunload', ['$event'])
onWindowClose(event: BeforeUnloadEvent):void {
event.returnValue = true;
this.cierreVentana.emit();
this.colgar();
}
async confirm():Promise<boolean>{
let {dismiss} = await Swal.fire({
title:'Seguro desea salir?',
text:'Abandonará una llamada',
showCancelButton:true,
cancelButtonText:'Permanecer aquí',
confirmButtonText:'Seguro que quiero salir',
})
return !dismiss
}
enviarMensaje(){
this.mensaje = this.mensaje.concat(`${this.mensajeTemp}\n`);
this.conn.send({message:this.mensajeTemp});
this.newMessage.emit(this.mensaje);
this.mensajeTemp = '';
}
enviarSMS(){
this.sendSMS.emit();
}
imag | const canvas = document.createElement('canvas');
canvas.width = this.video.nativeElement.videoWidth;
canvas.height = this.video.nativeElement.videoHeight;
canvas.getContext("2d").drawImage(this.video.nativeElement,0,0);
let imgUrl;
canvas.toBlob(async (blob)=>{
imgUrl = window.URL.createObjectURL(blob);
let {value,dismiss} = await Swal.fire({
imageUrl:window.URL.createObjectURL(blob),
title:'Desea guardar esta imagen en la lista de caputras de esta teleconsulta?',
showCancelButton:true
});
if(value){
this.addImage.emit(imgUrl);
}
});
}
reconect(){
this.conn.send({reconnect:true});
}
async cancelCall(){
this.calling = false;
this.conn.send({cancelCall:true});
}
finalizar(){
ConcatenateBlobs(this.callChunks, 'audio/webm', (resultingBlob) =>{
this.audio = resultingBlob;
this.colgado.emit();
});
}
getAudio(){
return new Promise((res,rej)=>{
ConcatenateBlobs(this.callChunks, 'audio/webm', async (resultingBlob) =>{
this.recorder.stop();
res(resultingBlob);
})
})
}
// get audioBlob(){
// ConcatenateBlobs(this.callChunks, 'audio/webm', async (resultingBlob) =>{
// this.audio = resultingBlob;
// // this.preview.nativeElement.src = URL.createObjectURL(resultingBlob);
// });
// return null;
// }
}
/*
const audioCtx = new AudioContext();
console.log(this.localStream);
const dest =audioCtx.createMediaStreamDestination();
let localsource = audioCtx.createMediaStreamSource(this.localStream);
let remoteSource = audioCtx.createMediaStreamSource(this.remoteStream);
localsource.connect(dest);
remoteSource.connect(dest);
console.log(dest.stream.getTracks()[0])
*/ | eCapture(){
| identifier_name |
model.py | #!/usr/bin/env python
from __future__ import division
import sys
import math
logs = sys.stderr
from collections import defaultdict
import time
from mytime import Mytime
import gflags as flags
FLAGS=flags.FLAGS
flags.DEFINE_string("weights", None, "weights file (feature instances and weights)", short_name="w")
flags.DEFINE_boolean("svector", False, "use David's svector (Cython) instead of Pythonic defaultdict")
flags.DEFINE_boolean("featstat", False, "print feature stats")
flags.DEFINE_string("outputweights", None, "write weights (in short-hand format); - for STDOUT", short_name="ow")
flags.DEFINE_boolean("autoeval", True, "use automatically generated eval module")
flags.DEFINE_integer("unk", 0, "treat words with count less than COUNT as UNKNOWN")
flags.DEFINE_boolean("debug_wordfreq", False, "print word freq info")
flags.DEFINE_boolean("unktag", False, "use POS tags for unknown words")
flags.DEFINE_boolean("unkdel", False, "remove features involving unks")
flags.DEFINE_boolean("s2", True, "use s2t features")
def new_vector():
return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda
class Model(object):
'''templates and weights.'''
## __slots__ = "templates", "weights", "list_templates", "freq_templates"
names = ["SHIFT", "LEFT", "RIGHT"]
indent = " " * 4
eval_module = None # by default, use my handwritten static_eval()
def __init__(self, weightstr):
|
def count_knowns_from_train(self, trainfile, devfile):
'''used in training'''
print >> logs, "counting word freqs from %s, unktag=%s" % (trainfile, self.unktag)
stime = time.time()
words = defaultdict(int)
for i, line in enumerate(open(trainfile)):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
words[word] += 1
if FLAGS.debug_wordfreq:
devunk1 = set()
devunk0 = set()
for line in open(devfile):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
if words[word] <= self.unk and words[word] > 0:
devunk1.add(word)
if words[word] == 0:
devunk0.add(word)
print >> logs, "=1", len(devunk1), " ".join(sorted(devunk1))
print >> logs
print >> logs, "=0", len(devunk0), " ".join(sorted(devunk0))
## freqs = defaultdict(list)
## for word, freq in words.items():
## freqs[freq].append(word)
## for freq in sorted(freqs, reverse=True):
## print >> logs, freq, len(freqs[freq]), " ".join(sorted(freqs[freq]))
## print >> logs
self.knowns = set()
for word, freq in words.items():
if freq > self.unk:
self.knowns.add(word)
print >> logs, "%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds" % \
(i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime)
## print >> logs, " ".join(sorted(self.knowns))
def add_template(self, s, freq=1):
## like this: "s0w-s0t=%s|%s" % (s0w, s0t)
symbols = s.split("-") # static part: s0w-s0t
if s not in self.templates:
tmp = '"%s=%s" %% (%s)' % (s, \
"|".join(["%s"] * len(symbols)), \
", ".join(symbols))
self.templates[s] = compile(tmp, "2", "eval")
self.list_templates.append((s, tmp)) # in order
self.freq_templates[s] += int(freq)
def print_autoevals(self):
tfilename = str(int(time.time()))
templatefile = open("/tmp/%s.py" % tfilename, "wt")
print >> templatefile, "#generated by model.py"
print >> templatefile, "import sys; print >> sys.stderr, 'importing succeeded!'"
print >> templatefile, "def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):"
print >> templatefile, "%sreturn [" % Model.indent
for s, e in self.list_templates:
print >> templatefile, "%s%s," % (Model.indent * 2, e)
print >> templatefile, "%s]" % (Model.indent * 2)
templatefile.close()
if FLAGS.autoeval:
sys.path.append('/tmp/')
print >> logs, "importing auto-generated file /tmp/%s.py" % tfilename
# to be used in newstate
Model.eval_module = __import__(tfilename)
else:
Model.eval_module = Model
def print_templates(self, f=logs):
print >> f, ">>> %d templates in total:" % len(self.templates)
print >> f, "\n".join(["%-20s\t%d" % (x, self.freq_templates[x]) \
for x, _ in self.list_templates])
print >> f, "---"
def read_templates(self, filename):
## try interpreting it as a filename, if failed, then as a string
try:
f = open(filename)
print >> logs, "reading templates from %s" % filename,
for x in f:
if x[:3] == "---":
break
if x[:3] == ">>>":
continue
try:
s, freq = x.split()
except:
s, freq = x, 1
self.add_template(s, freq)
except:
## from argv string rather than file
for x in filename.split():
self.add_template(x)
f = None
print >> logs, "%d feature templates read." % len(self.templates)
return f
def read_weights(self, filename, infertemplates=False):
'''instances are like "s0t-q0t=LRB-</s>=>LEFT 3.8234"'''
infile = self.read_templates(filename)
infertemplates = len(self.templates) <= 1
if infertemplates:
print >> logs, "will infer templates from weights..."
mytime = Mytime()
i = 0
if infile is not None:
print >> logs, "reading feature weights from %s\t" % filename,
for i, line in enumerate(infile, 1):
if i % 200000 == 0:
print >> logs, "%d lines read..." % i,
if line[0] == " ":
# TODO: separate known words line (last line)
self.knowns = set(line.split())
print >> logs, "\n%d known words read." % len(self.knowns)
self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x
break
feat, weight = line.split()
self.weights[feat] = float(weight)
if infertemplates:
self.add_template(feat.split("=", 1)[0], 1) ## one occurrence
print >> logs, "\n%d feature instances (%d lines) read in %.2lf seconds." % \
(len(self.weights), i, mytime.period())
self.print_autoevals()
def make_feats(self, state):
'''returns a *list* of feature templates for state.'''
fv = new_vector() #Vector()
top = state.top()
topnext = state.top(1)
top3rd = state.top(2)
qhead = state.qhead()
qnext = state.qhead(1)
## this part is manual; their combinations are automatic
s0 = top.head() if top is not None else ("<s>", "<s>") # N.B. (...)
s1 = topnext.head() if topnext is not None else ("<s>", "<s>")
s2 = top3rd.head() if top3rd is not None else ("<s>", "<s>")
q0 = qhead if qhead is not None else ("</s>", "</s>")
q1 = qnext if qnext is not None else ("</s>", "</s>")
s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else "NONE"
s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else "NONE"
s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else "NONE"
s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else "NONE"
## like this: "s0w-s0t=%s|%s" % (s0w, s0t) ---> returns a list here!
return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct))
# return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys
def write(self, filename="-", weights=None):
if weights is None:
weights = self.weights
if filename == "-":
outfile = sys.stdout
filename = "STDOUT" # careful overriding
else:
outfile = open(filename, "wt")
self.print_templates(outfile)
mytime = Mytime()
nonzero = 0
print >> logs, "sorting %d features..." % len(weights),
for i, f in enumerate(sorted(weights), 1):
if i == 1: # sorting done
print >> logs, "done in %.2lf seconds." % mytime.period()
print >> logs, "writing features to %s..." % filename
v = weights[f]
if math.fabs(v) > 1e-3:
print >> outfile, "%s\t%.5lf" % (f, v)
nonzero += 1
if self.unk > 0: # print known words
print >> outfile, " " + " ".join(sorted(self.knowns)) # " " to mark
print >> logs, "%d nonzero feature instances written in %.2lf seconds." % \
(nonzero, mytime.period()) ## nonzero != i
@staticmethod
def trim(fv):
for f in fv:
if math.fabs(fv[f]) < 1e-3:
del fv[f]
return fv
@staticmethod
def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):
return ["q0t=%s" % (q0t),
"q0w-q0t=%s|%s" % (q0w, q0t),
"q0w=%s" % (q0w),
"s0t-q0t-q1t=%s|%s|%s" % (s0t, q0t, q1t),
"s0t-q0t=%s|%s" % (s0t, q0t),
"s0t-s1t=%s|%s" % (s0t, s1t),
"s0t-s1w-s1t=%s|%s|%s" % (s0t, s1w, s1t),
"s0t=%s" % (s0t),
"s0w-q0t-q1t=%s|%s|%s" % (s0w, q0t, q1t),
"s0w-s0t-s1t=%s|%s|%s" % (s0w, s0t, s1t),
"s0w-s0t-s1w-s1t=%s|%s|%s|%s" % (s0w, s0t, s1w, s1t),
"s0w-s0t-s1w=%s|%s|%s" % (s0w, s0t, s1w),
"s0w-s0t=%s|%s" % (s0w, s0t),
"s0w-s1w-s1t=%s|%s|%s" % (s0w, s1w, s1t),
"s0w-s1w=%s|%s" % (s0w, s1w),
"s0w=%s" % (s0w),
"s1t-s0t-q0t=%s|%s|%s" % (s1t, s0t, q0t),
"s1t-s0t-s0lct=%s|%s|%s" % (s1t, s0t, s0lct),
"s1t-s0t-s0rct=%s|%s|%s" % (s1t, s0t, s0rct),
"s1t-s0w-q0t=%s|%s|%s" % (s1t, s0w, q0t),
"s1t-s0w-s0lct=%s|%s|%s" % (s1t, s0w, s0lct),
"s1t-s1lct-s0t=%s|%s|%s" % (s1t, s1lct, s0t),
"s1t-s1lct-s0w=%s|%s|%s" % (s1t, s1lct, s0w),
"s1t-s1rct-s0t=%s|%s|%s" % (s1t, s1rct, s0t),
"s1t-s1rct-s0w=%s|%s|%s" % (s1t, s1rct, s0w),
"s1t=%s" % (s1t),
"s1w-s1t=%s|%s" % (s1w, s1t),
"s1w=%s" % (s1w),
"s2t-s1t-s0t=%s|%s|%s" % (s2t, s1t, s0t)]
def prune(self, filenames):
'''prune features from word/tag lines'''
print >> logs, "pruning features using %s..." % filenames,
fullset = set()
for filename in filenames.split():
for l in open(filename):
for w, t in map(lambda x:x.rsplit("/", 1), l.split()):
fullset.add(w)
fullset.add(t)
print >> logs, "collected %d uniq words & tags..." % (len(fullset)),
new = new_vector() # Vector()
for f in self.weights:
stuff = f.split("=", 1)[1].rsplit("=", 1)[0].split("|") ## b/w 1st and last "=", but caution
for s in stuff:
if s not in fullset:
break
else:
new[f] = self.weights[f]
print >> logs, "%d features survived (ratio: %.2f)" % (len(new), len(new) / len(self.weights))
self.weights = new
def sparsify(self, z=1):
'''duchi et al., 2008'''
if __name__ == "__main__":
flags.DEFINE_string("prune", None, "prune features w.r.t. FILE (word/tag format)")
try:
argv = FLAGS(sys.argv)
if FLAGS.weights is None:
raise flags.FlagsError("must specify weights by -w ...")
except flags.FlagsError, e:
print >> logs, 'Error: %s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
FLAGS.featstat = True
model = Model(FLAGS.weights) #.model, FLAGS.weights)
if FLAGS.prune:
model.prune(FLAGS.prune)
if FLAGS.outputweights:
model.write(FLAGS.outputweights)
| self.knowns = set()
self.unk = FLAGS.unk
self.unktag = FLAGS.unktag
self.unkdel = FLAGS.unkdel
assert not (self.unkdel and self.unktag), "UNKDEL and UNKTAG can't be both true"
if FLAGS.svector: # now it is known
global svector
try:
svector = __import__("svector")
print >> logs, "WARNING: using David's svector (Cython). Performance might suffer."
except:
print >> logs, "WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster)."
FLAGS.svector = False # important
self.templates = {} # mapping from "s0t-q0t" to the eval expression
self.list_templates = [] # ordered list of template keys "s0t-q0t"
self.freq_templates = defaultdict(int)
self.weights = new_vector() #Vector()
self.read_weights(weightstr)
## self.featurenames = set(self.weights.iterkeys())
if FLAGS.featstat:
self.print_templates() | identifier_body |
model.py | #!/usr/bin/env python
from __future__ import division
import sys
import math
logs = sys.stderr
from collections import defaultdict
import time
from mytime import Mytime
import gflags as flags
FLAGS=flags.FLAGS
flags.DEFINE_string("weights", None, "weights file (feature instances and weights)", short_name="w")
flags.DEFINE_boolean("svector", False, "use David's svector (Cython) instead of Pythonic defaultdict")
flags.DEFINE_boolean("featstat", False, "print feature stats")
flags.DEFINE_string("outputweights", None, "write weights (in short-hand format); - for STDOUT", short_name="ow")
flags.DEFINE_boolean("autoeval", True, "use automatically generated eval module")
flags.DEFINE_integer("unk", 0, "treat words with count less than COUNT as UNKNOWN")
flags.DEFINE_boolean("debug_wordfreq", False, "print word freq info")
flags.DEFINE_boolean("unktag", False, "use POS tags for unknown words")
flags.DEFINE_boolean("unkdel", False, "remove features involving unks")
flags.DEFINE_boolean("s2", True, "use s2t features")
def new_vector():
return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda
class Model(object):
'''templates and weights.'''
## __slots__ = "templates", "weights", "list_templates", "freq_templates"
names = ["SHIFT", "LEFT", "RIGHT"]
indent = " " * 4
eval_module = None # by default, use my handwritten static_eval()
def __init__(self, weightstr):
self.knowns = set()
self.unk = FLAGS.unk
self.unktag = FLAGS.unktag
self.unkdel = FLAGS.unkdel
assert not (self.unkdel and self.unktag), "UNKDEL and UNKTAG can't be both true"
if FLAGS.svector: # now it is known
global svector
try:
svector = __import__("svector")
print >> logs, "WARNING: using David's svector (Cython). Performance might suffer."
except:
print >> logs, "WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster)."
FLAGS.svector = False # important
self.templates = {} # mapping from "s0t-q0t" to the eval expression
self.list_templates = [] # ordered list of template keys "s0t-q0t"
self.freq_templates = defaultdict(int)
self.weights = new_vector() #Vector()
self.read_weights(weightstr)
## self.featurenames = set(self.weights.iterkeys())
if FLAGS.featstat:
self.print_templates()
def count_knowns_from_train(self, trainfile, devfile):
'''used in training'''
print >> logs, "counting word freqs from %s, unktag=%s" % (trainfile, self.unktag)
stime = time.time()
words = defaultdict(int)
for i, line in enumerate(open(trainfile)):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
words[word] += 1
if FLAGS.debug_wordfreq:
devunk1 = set()
devunk0 = set()
for line in open(devfile):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
if words[word] <= self.unk and words[word] > 0:
devunk1.add(word)
if words[word] == 0:
devunk0.add(word)
print >> logs, "=1", len(devunk1), " ".join(sorted(devunk1))
print >> logs
print >> logs, "=0", len(devunk0), " ".join(sorted(devunk0))
## freqs = defaultdict(list)
## for word, freq in words.items():
## freqs[freq].append(word)
## for freq in sorted(freqs, reverse=True):
## print >> logs, freq, len(freqs[freq]), " ".join(sorted(freqs[freq]))
## print >> logs
self.knowns = set()
for word, freq in words.items():
if freq > self.unk:
self.knowns.add(word)
print >> logs, "%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds" % \
(i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime)
## print >> logs, " ".join(sorted(self.knowns))
def add_template(self, s, freq=1):
## like this: "s0w-s0t=%s|%s" % (s0w, s0t)
symbols = s.split("-") # static part: s0w-s0t
if s not in self.templates:
tmp = '"%s=%s" %% (%s)' % (s, \
"|".join(["%s"] * len(symbols)), \
", ".join(symbols))
self.templates[s] = compile(tmp, "2", "eval")
self.list_templates.append((s, tmp)) # in order
self.freq_templates[s] += int(freq)
def print_autoevals(self):
tfilename = str(int(time.time()))
templatefile = open("/tmp/%s.py" % tfilename, "wt")
print >> templatefile, "#generated by model.py"
print >> templatefile, "import sys; print >> sys.stderr, 'importing succeeded!'"
print >> templatefile, "def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):"
print >> templatefile, "%sreturn [" % Model.indent
for s, e in self.list_templates:
print >> templatefile, "%s%s," % (Model.indent * 2, e)
print >> templatefile, "%s]" % (Model.indent * 2)
templatefile.close()
if FLAGS.autoeval:
sys.path.append('/tmp/')
print >> logs, "importing auto-generated file /tmp/%s.py" % tfilename
# to be used in newstate
Model.eval_module = __import__(tfilename)
else:
Model.eval_module = Model
def print_templates(self, f=logs):
print >> f, ">>> %d templates in total:" % len(self.templates)
print >> f, "\n".join(["%-20s\t%d" % (x, self.freq_templates[x]) \
for x, _ in self.list_templates])
print >> f, "---"
def read_templates(self, filename):
## try interpreting it as a filename, if failed, then as a string
try:
f = open(filename)
print >> logs, "reading templates from %s" % filename,
for x in f:
if x[:3] == "---":
break
if x[:3] == ">>>":
continue
try:
s, freq = x.split()
except:
s, freq = x, 1
self.add_template(s, freq)
except:
## from argv string rather than file
for x in filename.split():
self.add_template(x)
f = None
print >> logs, "%d feature templates read." % len(self.templates)
return f
def read_weights(self, filename, infertemplates=False):
'''instances are like "s0t-q0t=LRB-</s>=>LEFT 3.8234"'''
infile = self.read_templates(filename)
infertemplates = len(self.templates) <= 1
if infertemplates:
print >> logs, "will infer templates from weights..."
mytime = Mytime()
i = 0
if infile is not None:
print >> logs, "reading feature weights from %s\t" % filename,
for i, line in enumerate(infile, 1):
if i % 200000 == 0:
print >> logs, "%d lines read..." % i,
if line[0] == " ":
# TODO: separate known words line (last line)
self.knowns = set(line.split())
print >> logs, "\n%d known words read." % len(self.knowns)
self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x
break
feat, weight = line.split()
self.weights[feat] = float(weight)
if infertemplates:
self.add_template(feat.split("=", 1)[0], 1) ## one occurrence
print >> logs, "\n%d feature instances (%d lines) read in %.2lf seconds." % \
(len(self.weights), i, mytime.period())
self.print_autoevals()
def make_feats(self, state):
'''returns a *list* of feature templates for state.'''
fv = new_vector() #Vector()
top = state.top()
topnext = state.top(1)
top3rd = state.top(2)
qhead = state.qhead()
qnext = state.qhead(1)
## this part is manual; their combinations are automatic
s0 = top.head() if top is not None else ("<s>", "<s>") # N.B. (...)
s1 = topnext.head() if topnext is not None else ("<s>", "<s>")
s2 = top3rd.head() if top3rd is not None else ("<s>", "<s>")
q0 = qhead if qhead is not None else ("</s>", "</s>")
q1 = qnext if qnext is not None else ("</s>", "</s>")
s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else "NONE"
s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else "NONE"
s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else "NONE"
s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else "NONE"
## like this: "s0w-s0t=%s|%s" % (s0w, s0t) ---> returns a list here!
return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct))
# return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys
def write(self, filename="-", weights=None):
if weights is None:
weights = self.weights
if filename == "-":
outfile = sys.stdout
filename = "STDOUT" # careful overriding
else:
outfile = open(filename, "wt")
self.print_templates(outfile)
mytime = Mytime()
nonzero = 0
print >> logs, "sorting %d features..." % len(weights),
for i, f in enumerate(sorted(weights), 1):
if i == 1: # sorting done
print >> logs, "done in %.2lf seconds." % mytime.period()
print >> logs, "writing features to %s..." % filename
v = weights[f]
if math.fabs(v) > 1e-3:
print >> outfile, "%s\t%.5lf" % (f, v)
nonzero += 1
if self.unk > 0: # print known words
print >> outfile, " " + " ".join(sorted(self.knowns)) # " " to mark
print >> logs, "%d nonzero feature instances written in %.2lf seconds." % \
(nonzero, mytime.period()) ## nonzero != i
@staticmethod
def trim(fv):
for f in fv:
if math.fabs(fv[f]) < 1e-3:
|
return fv
@staticmethod
def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):
return ["q0t=%s" % (q0t),
"q0w-q0t=%s|%s" % (q0w, q0t),
"q0w=%s" % (q0w),
"s0t-q0t-q1t=%s|%s|%s" % (s0t, q0t, q1t),
"s0t-q0t=%s|%s" % (s0t, q0t),
"s0t-s1t=%s|%s" % (s0t, s1t),
"s0t-s1w-s1t=%s|%s|%s" % (s0t, s1w, s1t),
"s0t=%s" % (s0t),
"s0w-q0t-q1t=%s|%s|%s" % (s0w, q0t, q1t),
"s0w-s0t-s1t=%s|%s|%s" % (s0w, s0t, s1t),
"s0w-s0t-s1w-s1t=%s|%s|%s|%s" % (s0w, s0t, s1w, s1t),
"s0w-s0t-s1w=%s|%s|%s" % (s0w, s0t, s1w),
"s0w-s0t=%s|%s" % (s0w, s0t),
"s0w-s1w-s1t=%s|%s|%s" % (s0w, s1w, s1t),
"s0w-s1w=%s|%s" % (s0w, s1w),
"s0w=%s" % (s0w),
"s1t-s0t-q0t=%s|%s|%s" % (s1t, s0t, q0t),
"s1t-s0t-s0lct=%s|%s|%s" % (s1t, s0t, s0lct),
"s1t-s0t-s0rct=%s|%s|%s" % (s1t, s0t, s0rct),
"s1t-s0w-q0t=%s|%s|%s" % (s1t, s0w, q0t),
"s1t-s0w-s0lct=%s|%s|%s" % (s1t, s0w, s0lct),
"s1t-s1lct-s0t=%s|%s|%s" % (s1t, s1lct, s0t),
"s1t-s1lct-s0w=%s|%s|%s" % (s1t, s1lct, s0w),
"s1t-s1rct-s0t=%s|%s|%s" % (s1t, s1rct, s0t),
"s1t-s1rct-s0w=%s|%s|%s" % (s1t, s1rct, s0w),
"s1t=%s" % (s1t),
"s1w-s1t=%s|%s" % (s1w, s1t),
"s1w=%s" % (s1w),
"s2t-s1t-s0t=%s|%s|%s" % (s2t, s1t, s0t)]
def prune(self, filenames):
'''prune features from word/tag lines'''
print >> logs, "pruning features using %s..." % filenames,
fullset = set()
for filename in filenames.split():
for l in open(filename):
for w, t in map(lambda x:x.rsplit("/", 1), l.split()):
fullset.add(w)
fullset.add(t)
print >> logs, "collected %d uniq words & tags..." % (len(fullset)),
new = new_vector() # Vector()
for f in self.weights:
stuff = f.split("=", 1)[1].rsplit("=", 1)[0].split("|") ## b/w 1st and last "=", but caution
for s in stuff:
if s not in fullset:
break
else:
new[f] = self.weights[f]
print >> logs, "%d features survived (ratio: %.2f)" % (len(new), len(new) / len(self.weights))
self.weights = new
def sparsify(self, z=1):
'''duchi et al., 2008'''
if __name__ == "__main__":
flags.DEFINE_string("prune", None, "prune features w.r.t. FILE (word/tag format)")
try:
argv = FLAGS(sys.argv)
if FLAGS.weights is None:
raise flags.FlagsError("must specify weights by -w ...")
except flags.FlagsError, e:
print >> logs, 'Error: %s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
FLAGS.featstat = True
model = Model(FLAGS.weights) #.model, FLAGS.weights)
if FLAGS.prune:
model.prune(FLAGS.prune)
if FLAGS.outputweights:
model.write(FLAGS.outputweights)
| del fv[f] | conditional_block |
model.py | #!/usr/bin/env python
from __future__ import division
import sys
import math
logs = sys.stderr
from collections import defaultdict
import time
from mytime import Mytime
import gflags as flags
FLAGS=flags.FLAGS
flags.DEFINE_string("weights", None, "weights file (feature instances and weights)", short_name="w")
flags.DEFINE_boolean("svector", False, "use David's svector (Cython) instead of Pythonic defaultdict")
flags.DEFINE_boolean("featstat", False, "print feature stats")
flags.DEFINE_string("outputweights", None, "write weights (in short-hand format); - for STDOUT", short_name="ow")
flags.DEFINE_boolean("autoeval", True, "use automatically generated eval module")
flags.DEFINE_integer("unk", 0, "treat words with count less than COUNT as UNKNOWN")
flags.DEFINE_boolean("debug_wordfreq", False, "print word freq info")
flags.DEFINE_boolean("unktag", False, "use POS tags for unknown words")
flags.DEFINE_boolean("unkdel", False, "remove features involving unks")
flags.DEFINE_boolean("s2", True, "use s2t features")
def new_vector():
return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda
class Model(object):
'''templates and weights.'''
## __slots__ = "templates", "weights", "list_templates", "freq_templates"
names = ["SHIFT", "LEFT", "RIGHT"]
indent = " " * 4
eval_module = None # by default, use my handwritten static_eval()
def __init__(self, weightstr):
self.knowns = set()
self.unk = FLAGS.unk
self.unktag = FLAGS.unktag
self.unkdel = FLAGS.unkdel
assert not (self.unkdel and self.unktag), "UNKDEL and UNKTAG can't be both true"
if FLAGS.svector: # now it is known
global svector
try:
svector = __import__("svector")
print >> logs, "WARNING: using David's svector (Cython). Performance might suffer."
except:
print >> logs, "WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster)."
FLAGS.svector = False # important
self.templates = {} # mapping from "s0t-q0t" to the eval expression
self.list_templates = [] # ordered list of template keys "s0t-q0t"
self.freq_templates = defaultdict(int)
self.weights = new_vector() #Vector()
self.read_weights(weightstr)
## self.featurenames = set(self.weights.iterkeys())
if FLAGS.featstat:
self.print_templates()
def count_knowns_from_train(self, trainfile, devfile):
'''used in training'''
print >> logs, "counting word freqs from %s, unktag=%s" % (trainfile, self.unktag)
stime = time.time()
words = defaultdict(int)
for i, line in enumerate(open(trainfile)):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
words[word] += 1
if FLAGS.debug_wordfreq:
devunk1 = set()
devunk0 = set()
for line in open(devfile):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
if words[word] <= self.unk and words[word] > 0:
devunk1.add(word)
if words[word] == 0:
devunk0.add(word)
print >> logs, "=1", len(devunk1), " ".join(sorted(devunk1))
print >> logs |
## for freq in sorted(freqs, reverse=True):
## print >> logs, freq, len(freqs[freq]), " ".join(sorted(freqs[freq]))
## print >> logs
self.knowns = set()
for word, freq in words.items():
if freq > self.unk:
self.knowns.add(word)
print >> logs, "%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds" % \
(i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime)
## print >> logs, " ".join(sorted(self.knowns))
def add_template(self, s, freq=1):
## like this: "s0w-s0t=%s|%s" % (s0w, s0t)
symbols = s.split("-") # static part: s0w-s0t
if s not in self.templates:
tmp = '"%s=%s" %% (%s)' % (s, \
"|".join(["%s"] * len(symbols)), \
", ".join(symbols))
self.templates[s] = compile(tmp, "2", "eval")
self.list_templates.append((s, tmp)) # in order
self.freq_templates[s] += int(freq)
def print_autoevals(self):
tfilename = str(int(time.time()))
templatefile = open("/tmp/%s.py" % tfilename, "wt")
print >> templatefile, "#generated by model.py"
print >> templatefile, "import sys; print >> sys.stderr, 'importing succeeded!'"
print >> templatefile, "def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):"
print >> templatefile, "%sreturn [" % Model.indent
for s, e in self.list_templates:
print >> templatefile, "%s%s," % (Model.indent * 2, e)
print >> templatefile, "%s]" % (Model.indent * 2)
templatefile.close()
if FLAGS.autoeval:
sys.path.append('/tmp/')
print >> logs, "importing auto-generated file /tmp/%s.py" % tfilename
# to be used in newstate
Model.eval_module = __import__(tfilename)
else:
Model.eval_module = Model
def print_templates(self, f=logs):
print >> f, ">>> %d templates in total:" % len(self.templates)
print >> f, "\n".join(["%-20s\t%d" % (x, self.freq_templates[x]) \
for x, _ in self.list_templates])
print >> f, "---"
def read_templates(self, filename):
## try interpreting it as a filename, if failed, then as a string
try:
f = open(filename)
print >> logs, "reading templates from %s" % filename,
for x in f:
if x[:3] == "---":
break
if x[:3] == ">>>":
continue
try:
s, freq = x.split()
except:
s, freq = x, 1
self.add_template(s, freq)
except:
## from argv string rather than file
for x in filename.split():
self.add_template(x)
f = None
print >> logs, "%d feature templates read." % len(self.templates)
return f
def read_weights(self, filename, infertemplates=False):
'''instances are like "s0t-q0t=LRB-</s>=>LEFT 3.8234"'''
infile = self.read_templates(filename)
infertemplates = len(self.templates) <= 1
if infertemplates:
print >> logs, "will infer templates from weights..."
mytime = Mytime()
i = 0
if infile is not None:
print >> logs, "reading feature weights from %s\t" % filename,
for i, line in enumerate(infile, 1):
if i % 200000 == 0:
print >> logs, "%d lines read..." % i,
if line[0] == " ":
# TODO: separate known words line (last line)
self.knowns = set(line.split())
print >> logs, "\n%d known words read." % len(self.knowns)
self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x
break
feat, weight = line.split()
self.weights[feat] = float(weight)
if infertemplates:
self.add_template(feat.split("=", 1)[0], 1) ## one occurrence
print >> logs, "\n%d feature instances (%d lines) read in %.2lf seconds." % \
(len(self.weights), i, mytime.period())
self.print_autoevals()
def make_feats(self, state):
'''returns a *list* of feature templates for state.'''
fv = new_vector() #Vector()
top = state.top()
topnext = state.top(1)
top3rd = state.top(2)
qhead = state.qhead()
qnext = state.qhead(1)
## this part is manual; their combinations are automatic
s0 = top.head() if top is not None else ("<s>", "<s>") # N.B. (...)
s1 = topnext.head() if topnext is not None else ("<s>", "<s>")
s2 = top3rd.head() if top3rd is not None else ("<s>", "<s>")
q0 = qhead if qhead is not None else ("</s>", "</s>")
q1 = qnext if qnext is not None else ("</s>", "</s>")
s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else "NONE"
s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else "NONE"
s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else "NONE"
s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else "NONE"
## like this: "s0w-s0t=%s|%s" % (s0w, s0t) ---> returns a list here!
return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct))
# return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys
def write(self, filename="-", weights=None):
if weights is None:
weights = self.weights
if filename == "-":
outfile = sys.stdout
filename = "STDOUT" # careful overriding
else:
outfile = open(filename, "wt")
self.print_templates(outfile)
mytime = Mytime()
nonzero = 0
print >> logs, "sorting %d features..." % len(weights),
for i, f in enumerate(sorted(weights), 1):
if i == 1: # sorting done
print >> logs, "done in %.2lf seconds." % mytime.period()
print >> logs, "writing features to %s..." % filename
v = weights[f]
if math.fabs(v) > 1e-3:
print >> outfile, "%s\t%.5lf" % (f, v)
nonzero += 1
if self.unk > 0: # print known words
print >> outfile, " " + " ".join(sorted(self.knowns)) # " " to mark
print >> logs, "%d nonzero feature instances written in %.2lf seconds." % \
(nonzero, mytime.period()) ## nonzero != i
@staticmethod
def trim(fv):
for f in fv:
if math.fabs(fv[f]) < 1e-3:
del fv[f]
return fv
@staticmethod
def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):
return ["q0t=%s" % (q0t),
"q0w-q0t=%s|%s" % (q0w, q0t),
"q0w=%s" % (q0w),
"s0t-q0t-q1t=%s|%s|%s" % (s0t, q0t, q1t),
"s0t-q0t=%s|%s" % (s0t, q0t),
"s0t-s1t=%s|%s" % (s0t, s1t),
"s0t-s1w-s1t=%s|%s|%s" % (s0t, s1w, s1t),
"s0t=%s" % (s0t),
"s0w-q0t-q1t=%s|%s|%s" % (s0w, q0t, q1t),
"s0w-s0t-s1t=%s|%s|%s" % (s0w, s0t, s1t),
"s0w-s0t-s1w-s1t=%s|%s|%s|%s" % (s0w, s0t, s1w, s1t),
"s0w-s0t-s1w=%s|%s|%s" % (s0w, s0t, s1w),
"s0w-s0t=%s|%s" % (s0w, s0t),
"s0w-s1w-s1t=%s|%s|%s" % (s0w, s1w, s1t),
"s0w-s1w=%s|%s" % (s0w, s1w),
"s0w=%s" % (s0w),
"s1t-s0t-q0t=%s|%s|%s" % (s1t, s0t, q0t),
"s1t-s0t-s0lct=%s|%s|%s" % (s1t, s0t, s0lct),
"s1t-s0t-s0rct=%s|%s|%s" % (s1t, s0t, s0rct),
"s1t-s0w-q0t=%s|%s|%s" % (s1t, s0w, q0t),
"s1t-s0w-s0lct=%s|%s|%s" % (s1t, s0w, s0lct),
"s1t-s1lct-s0t=%s|%s|%s" % (s1t, s1lct, s0t),
"s1t-s1lct-s0w=%s|%s|%s" % (s1t, s1lct, s0w),
"s1t-s1rct-s0t=%s|%s|%s" % (s1t, s1rct, s0t),
"s1t-s1rct-s0w=%s|%s|%s" % (s1t, s1rct, s0w),
"s1t=%s" % (s1t),
"s1w-s1t=%s|%s" % (s1w, s1t),
"s1w=%s" % (s1w),
"s2t-s1t-s0t=%s|%s|%s" % (s2t, s1t, s0t)]
def prune(self, filenames):
'''prune features from word/tag lines'''
print >> logs, "pruning features using %s..." % filenames,
fullset = set()
for filename in filenames.split():
for l in open(filename):
for w, t in map(lambda x:x.rsplit("/", 1), l.split()):
fullset.add(w)
fullset.add(t)
print >> logs, "collected %d uniq words & tags..." % (len(fullset)),
new = new_vector() # Vector()
for f in self.weights:
stuff = f.split("=", 1)[1].rsplit("=", 1)[0].split("|") ## b/w 1st and last "=", but caution
for s in stuff:
if s not in fullset:
break
else:
new[f] = self.weights[f]
print >> logs, "%d features survived (ratio: %.2f)" % (len(new), len(new) / len(self.weights))
self.weights = new
def sparsify(self, z=1):
'''duchi et al., 2008'''
if __name__ == "__main__":
flags.DEFINE_string("prune", None, "prune features w.r.t. FILE (word/tag format)")
try:
argv = FLAGS(sys.argv)
if FLAGS.weights is None:
raise flags.FlagsError("must specify weights by -w ...")
except flags.FlagsError, e:
print >> logs, 'Error: %s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
FLAGS.featstat = True
model = Model(FLAGS.weights) #.model, FLAGS.weights)
if FLAGS.prune:
model.prune(FLAGS.prune)
if FLAGS.outputweights:
model.write(FLAGS.outputweights) | print >> logs, "=0", len(devunk0), " ".join(sorted(devunk0))
## freqs = defaultdict(list)
## for word, freq in words.items():
## freqs[freq].append(word) | random_line_split |
model.py | #!/usr/bin/env python
from __future__ import division
import sys
import math
logs = sys.stderr
from collections import defaultdict
import time
from mytime import Mytime
import gflags as flags
FLAGS=flags.FLAGS
flags.DEFINE_string("weights", None, "weights file (feature instances and weights)", short_name="w")
flags.DEFINE_boolean("svector", False, "use David's svector (Cython) instead of Pythonic defaultdict")
flags.DEFINE_boolean("featstat", False, "print feature stats")
flags.DEFINE_string("outputweights", None, "write weights (in short-hand format); - for STDOUT", short_name="ow")
flags.DEFINE_boolean("autoeval", True, "use automatically generated eval module")
flags.DEFINE_integer("unk", 0, "treat words with count less than COUNT as UNKNOWN")
flags.DEFINE_boolean("debug_wordfreq", False, "print word freq info")
flags.DEFINE_boolean("unktag", False, "use POS tags for unknown words")
flags.DEFINE_boolean("unkdel", False, "remove features involving unks")
flags.DEFINE_boolean("s2", True, "use s2t features")
def new_vector():
return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda
class Model(object):
'''templates and weights.'''
## __slots__ = "templates", "weights", "list_templates", "freq_templates"
names = ["SHIFT", "LEFT", "RIGHT"]
indent = " " * 4
eval_module = None # by default, use my handwritten static_eval()
def __init__(self, weightstr):
self.knowns = set()
self.unk = FLAGS.unk
self.unktag = FLAGS.unktag
self.unkdel = FLAGS.unkdel
assert not (self.unkdel and self.unktag), "UNKDEL and UNKTAG can't be both true"
if FLAGS.svector: # now it is known
global svector
try:
svector = __import__("svector")
print >> logs, "WARNING: using David's svector (Cython). Performance might suffer."
except:
print >> logs, "WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster)."
FLAGS.svector = False # important
self.templates = {} # mapping from "s0t-q0t" to the eval expression
self.list_templates = [] # ordered list of template keys "s0t-q0t"
self.freq_templates = defaultdict(int)
self.weights = new_vector() #Vector()
self.read_weights(weightstr)
## self.featurenames = set(self.weights.iterkeys())
if FLAGS.featstat:
self.print_templates()
def | (self, trainfile, devfile):
'''used in training'''
print >> logs, "counting word freqs from %s, unktag=%s" % (trainfile, self.unktag)
stime = time.time()
words = defaultdict(int)
for i, line in enumerate(open(trainfile)):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
words[word] += 1
if FLAGS.debug_wordfreq:
devunk1 = set()
devunk0 = set()
for line in open(devfile):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
if words[word] <= self.unk and words[word] > 0:
devunk1.add(word)
if words[word] == 0:
devunk0.add(word)
print >> logs, "=1", len(devunk1), " ".join(sorted(devunk1))
print >> logs
print >> logs, "=0", len(devunk0), " ".join(sorted(devunk0))
## freqs = defaultdict(list)
## for word, freq in words.items():
## freqs[freq].append(word)
## for freq in sorted(freqs, reverse=True):
## print >> logs, freq, len(freqs[freq]), " ".join(sorted(freqs[freq]))
## print >> logs
self.knowns = set()
for word, freq in words.items():
if freq > self.unk:
self.knowns.add(word)
print >> logs, "%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds" % \
(i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime)
## print >> logs, " ".join(sorted(self.knowns))
def add_template(self, s, freq=1):
## like this: "s0w-s0t=%s|%s" % (s0w, s0t)
symbols = s.split("-") # static part: s0w-s0t
if s not in self.templates:
tmp = '"%s=%s" %% (%s)' % (s, \
"|".join(["%s"] * len(symbols)), \
", ".join(symbols))
self.templates[s] = compile(tmp, "2", "eval")
self.list_templates.append((s, tmp)) # in order
self.freq_templates[s] += int(freq)
def print_autoevals(self):
tfilename = str(int(time.time()))
templatefile = open("/tmp/%s.py" % tfilename, "wt")
print >> templatefile, "#generated by model.py"
print >> templatefile, "import sys; print >> sys.stderr, 'importing succeeded!'"
print >> templatefile, "def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):"
print >> templatefile, "%sreturn [" % Model.indent
for s, e in self.list_templates:
print >> templatefile, "%s%s," % (Model.indent * 2, e)
print >> templatefile, "%s]" % (Model.indent * 2)
templatefile.close()
if FLAGS.autoeval:
sys.path.append('/tmp/')
print >> logs, "importing auto-generated file /tmp/%s.py" % tfilename
# to be used in newstate
Model.eval_module = __import__(tfilename)
else:
Model.eval_module = Model
def print_templates(self, f=logs):
print >> f, ">>> %d templates in total:" % len(self.templates)
print >> f, "\n".join(["%-20s\t%d" % (x, self.freq_templates[x]) \
for x, _ in self.list_templates])
print >> f, "---"
def read_templates(self, filename):
## try interpreting it as a filename, if failed, then as a string
try:
f = open(filename)
print >> logs, "reading templates from %s" % filename,
for x in f:
if x[:3] == "---":
break
if x[:3] == ">>>":
continue
try:
s, freq = x.split()
except:
s, freq = x, 1
self.add_template(s, freq)
except:
## from argv string rather than file
for x in filename.split():
self.add_template(x)
f = None
print >> logs, "%d feature templates read." % len(self.templates)
return f
def read_weights(self, filename, infertemplates=False):
'''instances are like "s0t-q0t=LRB-</s>=>LEFT 3.8234"'''
infile = self.read_templates(filename)
infertemplates = len(self.templates) <= 1
if infertemplates:
print >> logs, "will infer templates from weights..."
mytime = Mytime()
i = 0
if infile is not None:
print >> logs, "reading feature weights from %s\t" % filename,
for i, line in enumerate(infile, 1):
if i % 200000 == 0:
print >> logs, "%d lines read..." % i,
if line[0] == " ":
# TODO: separate known words line (last line)
self.knowns = set(line.split())
print >> logs, "\n%d known words read." % len(self.knowns)
self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x
break
feat, weight = line.split()
self.weights[feat] = float(weight)
if infertemplates:
self.add_template(feat.split("=", 1)[0], 1) ## one occurrence
print >> logs, "\n%d feature instances (%d lines) read in %.2lf seconds." % \
(len(self.weights), i, mytime.period())
self.print_autoevals()
def make_feats(self, state):
'''returns a *list* of feature templates for state.'''
fv = new_vector() #Vector()
top = state.top()
topnext = state.top(1)
top3rd = state.top(2)
qhead = state.qhead()
qnext = state.qhead(1)
## this part is manual; their combinations are automatic
s0 = top.head() if top is not None else ("<s>", "<s>") # N.B. (...)
s1 = topnext.head() if topnext is not None else ("<s>", "<s>")
s2 = top3rd.head() if top3rd is not None else ("<s>", "<s>")
q0 = qhead if qhead is not None else ("</s>", "</s>")
q1 = qnext if qnext is not None else ("</s>", "</s>")
s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else "NONE"
s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else "NONE"
s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else "NONE"
s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else "NONE"
## like this: "s0w-s0t=%s|%s" % (s0w, s0t) ---> returns a list here!
return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct))
# return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys
def write(self, filename="-", weights=None):
if weights is None:
weights = self.weights
if filename == "-":
outfile = sys.stdout
filename = "STDOUT" # careful overriding
else:
outfile = open(filename, "wt")
self.print_templates(outfile)
mytime = Mytime()
nonzero = 0
print >> logs, "sorting %d features..." % len(weights),
for i, f in enumerate(sorted(weights), 1):
if i == 1: # sorting done
print >> logs, "done in %.2lf seconds." % mytime.period()
print >> logs, "writing features to %s..." % filename
v = weights[f]
if math.fabs(v) > 1e-3:
print >> outfile, "%s\t%.5lf" % (f, v)
nonzero += 1
if self.unk > 0: # print known words
print >> outfile, " " + " ".join(sorted(self.knowns)) # " " to mark
print >> logs, "%d nonzero feature instances written in %.2lf seconds." % \
(nonzero, mytime.period()) ## nonzero != i
@staticmethod
def trim(fv):
for f in fv:
if math.fabs(fv[f]) < 1e-3:
del fv[f]
return fv
@staticmethod
def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):
return ["q0t=%s" % (q0t),
"q0w-q0t=%s|%s" % (q0w, q0t),
"q0w=%s" % (q0w),
"s0t-q0t-q1t=%s|%s|%s" % (s0t, q0t, q1t),
"s0t-q0t=%s|%s" % (s0t, q0t),
"s0t-s1t=%s|%s" % (s0t, s1t),
"s0t-s1w-s1t=%s|%s|%s" % (s0t, s1w, s1t),
"s0t=%s" % (s0t),
"s0w-q0t-q1t=%s|%s|%s" % (s0w, q0t, q1t),
"s0w-s0t-s1t=%s|%s|%s" % (s0w, s0t, s1t),
"s0w-s0t-s1w-s1t=%s|%s|%s|%s" % (s0w, s0t, s1w, s1t),
"s0w-s0t-s1w=%s|%s|%s" % (s0w, s0t, s1w),
"s0w-s0t=%s|%s" % (s0w, s0t),
"s0w-s1w-s1t=%s|%s|%s" % (s0w, s1w, s1t),
"s0w-s1w=%s|%s" % (s0w, s1w),
"s0w=%s" % (s0w),
"s1t-s0t-q0t=%s|%s|%s" % (s1t, s0t, q0t),
"s1t-s0t-s0lct=%s|%s|%s" % (s1t, s0t, s0lct),
"s1t-s0t-s0rct=%s|%s|%s" % (s1t, s0t, s0rct),
"s1t-s0w-q0t=%s|%s|%s" % (s1t, s0w, q0t),
"s1t-s0w-s0lct=%s|%s|%s" % (s1t, s0w, s0lct),
"s1t-s1lct-s0t=%s|%s|%s" % (s1t, s1lct, s0t),
"s1t-s1lct-s0w=%s|%s|%s" % (s1t, s1lct, s0w),
"s1t-s1rct-s0t=%s|%s|%s" % (s1t, s1rct, s0t),
"s1t-s1rct-s0w=%s|%s|%s" % (s1t, s1rct, s0w),
"s1t=%s" % (s1t),
"s1w-s1t=%s|%s" % (s1w, s1t),
"s1w=%s" % (s1w),
"s2t-s1t-s0t=%s|%s|%s" % (s2t, s1t, s0t)]
def prune(self, filenames):
'''prune features from word/tag lines'''
print >> logs, "pruning features using %s..." % filenames,
fullset = set()
for filename in filenames.split():
for l in open(filename):
for w, t in map(lambda x:x.rsplit("/", 1), l.split()):
fullset.add(w)
fullset.add(t)
print >> logs, "collected %d uniq words & tags..." % (len(fullset)),
new = new_vector() # Vector()
for f in self.weights:
stuff = f.split("=", 1)[1].rsplit("=", 1)[0].split("|") ## b/w 1st and last "=", but caution
for s in stuff:
if s not in fullset:
break
else:
new[f] = self.weights[f]
print >> logs, "%d features survived (ratio: %.2f)" % (len(new), len(new) / len(self.weights))
self.weights = new
def sparsify(self, z=1):
'''duchi et al., 2008'''
if __name__ == "__main__":
flags.DEFINE_string("prune", None, "prune features w.r.t. FILE (word/tag format)")
try:
argv = FLAGS(sys.argv)
if FLAGS.weights is None:
raise flags.FlagsError("must specify weights by -w ...")
except flags.FlagsError, e:
print >> logs, 'Error: %s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
FLAGS.featstat = True
model = Model(FLAGS.weights) #.model, FLAGS.weights)
if FLAGS.prune:
model.prune(FLAGS.prune)
if FLAGS.outputweights:
model.write(FLAGS.outputweights)
| count_knowns_from_train | identifier_name |
resolver.rs | use std::collections::HashMap;
use std::rc::Rc;
use crate::ast::{Expr, Identifier, Literal, Stmt, VisitorMut};
use crate::environment::Environment;
use crate::error::Reporter;
use crate::token::Position;
#[derive(Clone, PartialEq)]
pub enum FunctionKind {
None,
Function,
// TODO: add more kinds supposedly...
}
pub struct Resolver {
// track what things are currently in scope, for local block scopes
// (global scope is not tracked)
scopes: Vec<HashMap<String, bool>>,
// track if we are currently in a function, and if so what kind
current_fn: FunctionKind,
// for reporting errors found during this stage
err_reporter: Box<Reporter>,
// keep track of errors encountered
num_errors: u64,
}
impl Resolver {
pub fn new<R: Reporter + 'static>(err_reporter: R) -> Self {
Resolver {
scopes: Vec::new(),
// start out at the top level
current_fn: FunctionKind::None,
err_reporter: Box::new(err_reporter),
num_errors: 0,
}
}
pub fn resolve(&mut self, statements: &mut Vec<Stmt>) -> Result<(), String> {
let environment = Environment::new(None);
for s in statements {
// visit all the statements, and catch any errors
match self.visit_stmt(s, &environment) {
Ok(_) => (),
Err(_) => {
self.num_errors += 1;
}
}
}
if self.num_errors > 0 {
Err(format!("resolver encountered {} error(s)", self.num_errors))
} else {
Ok(())
}
}
// report an error
pub fn error(&mut self, pos: Position, msg: &str) {
self.err_reporter.report(msg, "here", &pos);
self.num_errors += 1;
}
// start a new scope
pub fn begin_scope(&mut self) |
// exit the current scope
pub fn end_scope(&mut self) {
self.scopes.pop();
}
// declare a variable in the current scope
pub fn declare(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// check if this has already been declared
if scope.contains_key(&ident.name.to_string()) {
// report the error, but don't return it
self.error(
ident.pos.clone(),
&format!("variable `{}` re-declared in local scope", ident.name),
);
} else {
// mark that the var exists, but is not yet initialized
scope.insert(ident.name.to_string(), false);
}
}
}
}
// define a variable in the current scope
pub fn define(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// mark that the var exists, and is now initialized
scope.insert(ident.name.to_string(), true);
}
}
}
// figure out where the var will resolve, and
// store that in the interpreter
pub fn resolve_local(&mut self, name: &str, resolved_dist: &mut Option<usize>) {
// start at the innermost scope and work outwards
for (dist, scope) in self.scopes.iter().rev().enumerate() {
if scope.contains_key(name) {
// NOTE:
// For the book this info is stored in a HashMap in the interpreter,
// like HashMap<Expr, u64>,
// which I tried, but then `Eq` and `Hash` have to be derived for all kinds
// of things, and `f64` doesn't implement `Eq`, and I don't want to manually
// implement it, not to mention `Hash` (which I didn't try).
//
// So, where should I store this info?
// From the book: "One obvious place is right in the syntax tree node itself."
// (the book does not take that approach, because "it would require mucking
// around with our syntax tree generator")
//
// I'm not using their generator anyway, so that's where I'm going to store
// this info - in the AST node itself.
*resolved_dist = Some(dist);
return;
}
}
// not found, assume it's global
}
pub fn resolve_function(
&mut self,
params: &Vec<Identifier>,
body: &mut Stmt,
env: &Rc<Environment>,
kind: FunctionKind,
) -> Result<(), String> {
// use the call stack to save the enclosing function kind,
// then set the current one
let enclosing_fn = self.current_fn.clone();
self.current_fn = kind;
// create a new scope for the function body
self.begin_scope();
// bind vars for each of the function parameters
for param in params {
self.declare(param);
self.define(param);
}
self.visit_stmt(body, env)?;
self.end_scope();
// back to whatever function may be enclosing this one
self.current_fn = enclosing_fn;
Ok(())
}
}
// mut because the resolver needs to modify Expr with resolved distance
impl VisitorMut<()> for Resolver {
type Error = String;
fn visit_stmt(&mut self, s: &mut Stmt, env: &Rc<Environment>) -> Result<(), String> {
match s {
Stmt::Block(statements) => {
// blocks create the local scopes for statements
self.begin_scope();
for stmt in statements {
// just have to resolve each statement in turn
self.visit_stmt(stmt, env)?;
}
self.end_scope();
}
Stmt::Expression(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Function(name, params, ref mut body) => {
// functions bind var names and create a local scope
// first, handle the binding of the function name
// (eagerly, so the function can recursively refer to itself)
self.declare(name);
self.define(name);
// then handle the function body
self.resolve_function(params, body, env, FunctionKind::Function)?;
}
Stmt::If(ref mut if_expr, ref mut then_stmt, ref mut opt_else_stmt) => {
// resolve the condition and both branches
self.visit_expr(if_expr, env)?;
self.visit_stmt(then_stmt, env)?;
if let Some(s) = opt_else_stmt {
self.visit_stmt(s, env)?;
}
}
Stmt::Print(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Return(ref mut expr) => {
// check that we are actually in a function
// TODO: this should probably use the position of the Stmt
// (BUT, there is not Position for Stmt, so have to implement that...)
if self.current_fn == FunctionKind::None {
self.error(expr.position().clone(), "cannot return from top-level code");
}
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Var(name, ref mut expr) => {
// this adds a new entry to the innermost scope
// variable binding is split into 2 steps - declaring and defining
self.declare(name);
self.visit_expr(expr, env)?;
self.define(name);
}
Stmt::While(ref mut condition_expr, ref mut body) => {
// resolve the condition and body
self.visit_expr(condition_expr, env)?;
self.visit_stmt(body, env)?;
}
}
Ok(())
}
fn visit_expr(&mut self, e: &mut Expr, env: &Rc<Environment>) -> Result<(), String> {
match e {
Expr::Assign(_pos, var_name, ref mut expr, ref mut resolved_vars) => {
// resolve the expr first in case it also contains other vars
self.visit_expr(expr, env)?;
// then resolve the var being assigned to
self.resolve_local(var_name, resolved_vars);
}
Expr::Binary(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve both operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Call(_pos, ref mut callee_expr, args) => {
// resolve the thing being called
self.visit_expr(callee_expr, env)?;
// then walk the arg list and resolve those
for arg in args {
self.visit_expr(arg, env)?;
}
}
Expr::Grouping(_pos, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Expr::Literal(_pos, _lit) => {
// nothing to do - literals don't mention vars, and don't have subexpressions
}
Expr::Logical(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve body operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Unary(_pos, _op, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the operand
}
Expr::Variable(pos, name, ref mut resolved_vars) => {
// have to check the scope maps to resolve var expressions
match self.scopes.last() {
None => (),
Some(scope) => {
// check if the var is referring to itself in its initializer
if scope.get(name) == Some(&false) {
self.error(pos.clone(), "cannot read local var in its initializer");
}
}
}
// actually resolve the var
self.resolve_local(name, resolved_vars);
}
}
Ok(())
}
fn visit_literal(&self, _l: &Literal, _env: &Rc<Environment>) -> Result<(), String> {
// nothing to do for these - not going to actually call the visit method above
Ok(())
}
}
| {
self.scopes.push(HashMap::new());
} | identifier_body |
resolver.rs | use std::collections::HashMap;
use std::rc::Rc;
use crate::ast::{Expr, Identifier, Literal, Stmt, VisitorMut};
use crate::environment::Environment;
use crate::error::Reporter;
use crate::token::Position;
#[derive(Clone, PartialEq)]
pub enum FunctionKind {
None,
Function,
// TODO: add more kinds supposedly...
}
pub struct Resolver {
// track what things are currently in scope, for local block scopes
// (global scope is not tracked)
scopes: Vec<HashMap<String, bool>>,
// track if we are currently in a function, and if so what kind
current_fn: FunctionKind,
// for reporting errors found during this stage
err_reporter: Box<Reporter>,
// keep track of errors encountered
num_errors: u64,
}
impl Resolver {
pub fn new<R: Reporter + 'static>(err_reporter: R) -> Self {
Resolver {
scopes: Vec::new(),
// start out at the top level
current_fn: FunctionKind::None,
err_reporter: Box::new(err_reporter),
num_errors: 0,
}
}
pub fn resolve(&mut self, statements: &mut Vec<Stmt>) -> Result<(), String> {
let environment = Environment::new(None);
for s in statements {
// visit all the statements, and catch any errors
match self.visit_stmt(s, &environment) {
Ok(_) => (),
Err(_) => {
self.num_errors += 1;
}
}
}
if self.num_errors > 0 {
Err(format!("resolver encountered {} error(s)", self.num_errors))
} else {
Ok(())
}
}
// report an error
pub fn error(&mut self, pos: Position, msg: &str) {
self.err_reporter.report(msg, "here", &pos);
self.num_errors += 1;
}
// start a new scope
pub fn begin_scope(&mut self) {
self.scopes.push(HashMap::new());
}
// exit the current scope
pub fn end_scope(&mut self) {
self.scopes.pop();
}
// declare a variable in the current scope
pub fn | (&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// check if this has already been declared
if scope.contains_key(&ident.name.to_string()) {
// report the error, but don't return it
self.error(
ident.pos.clone(),
&format!("variable `{}` re-declared in local scope", ident.name),
);
} else {
// mark that the var exists, but is not yet initialized
scope.insert(ident.name.to_string(), false);
}
}
}
}
// define a variable in the current scope
pub fn define(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// mark that the var exists, and is now initialized
scope.insert(ident.name.to_string(), true);
}
}
}
// figure out where the var will resolve, and
// store that in the interpreter
pub fn resolve_local(&mut self, name: &str, resolved_dist: &mut Option<usize>) {
// start at the innermost scope and work outwards
for (dist, scope) in self.scopes.iter().rev().enumerate() {
if scope.contains_key(name) {
// NOTE:
// For the book this info is stored in a HashMap in the interpreter,
// like HashMap<Expr, u64>,
// which I tried, but then `Eq` and `Hash` have to be derived for all kinds
// of things, and `f64` doesn't implement `Eq`, and I don't want to manually
// implement it, not to mention `Hash` (which I didn't try).
//
// So, where should I store this info?
// From the book: "One obvious place is right in the syntax tree node itself."
// (the book does not take that approach, because "it would require mucking
// around with our syntax tree generator")
//
// I'm not using their generator anyway, so that's where I'm going to store
// this info - in the AST node itself.
*resolved_dist = Some(dist);
return;
}
}
// not found, assume it's global
}
pub fn resolve_function(
&mut self,
params: &Vec<Identifier>,
body: &mut Stmt,
env: &Rc<Environment>,
kind: FunctionKind,
) -> Result<(), String> {
// use the call stack to save the enclosing function kind,
// then set the current one
let enclosing_fn = self.current_fn.clone();
self.current_fn = kind;
// create a new scope for the function body
self.begin_scope();
// bind vars for each of the function parameters
for param in params {
self.declare(param);
self.define(param);
}
self.visit_stmt(body, env)?;
self.end_scope();
// back to whatever function may be enclosing this one
self.current_fn = enclosing_fn;
Ok(())
}
}
// mut because the resolver needs to modify Expr with resolved distance
impl VisitorMut<()> for Resolver {
type Error = String;
fn visit_stmt(&mut self, s: &mut Stmt, env: &Rc<Environment>) -> Result<(), String> {
match s {
Stmt::Block(statements) => {
// blocks create the local scopes for statements
self.begin_scope();
for stmt in statements {
// just have to resolve each statement in turn
self.visit_stmt(stmt, env)?;
}
self.end_scope();
}
Stmt::Expression(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Function(name, params, ref mut body) => {
// functions bind var names and create a local scope
// first, handle the binding of the function name
// (eagerly, so the function can recursively refer to itself)
self.declare(name);
self.define(name);
// then handle the function body
self.resolve_function(params, body, env, FunctionKind::Function)?;
}
Stmt::If(ref mut if_expr, ref mut then_stmt, ref mut opt_else_stmt) => {
// resolve the condition and both branches
self.visit_expr(if_expr, env)?;
self.visit_stmt(then_stmt, env)?;
if let Some(s) = opt_else_stmt {
self.visit_stmt(s, env)?;
}
}
Stmt::Print(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Return(ref mut expr) => {
// check that we are actually in a function
// TODO: this should probably use the position of the Stmt
// (BUT, there is not Position for Stmt, so have to implement that...)
if self.current_fn == FunctionKind::None {
self.error(expr.position().clone(), "cannot return from top-level code");
}
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Var(name, ref mut expr) => {
// this adds a new entry to the innermost scope
// variable binding is split into 2 steps - declaring and defining
self.declare(name);
self.visit_expr(expr, env)?;
self.define(name);
}
Stmt::While(ref mut condition_expr, ref mut body) => {
// resolve the condition and body
self.visit_expr(condition_expr, env)?;
self.visit_stmt(body, env)?;
}
}
Ok(())
}
fn visit_expr(&mut self, e: &mut Expr, env: &Rc<Environment>) -> Result<(), String> {
match e {
Expr::Assign(_pos, var_name, ref mut expr, ref mut resolved_vars) => {
// resolve the expr first in case it also contains other vars
self.visit_expr(expr, env)?;
// then resolve the var being assigned to
self.resolve_local(var_name, resolved_vars);
}
Expr::Binary(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve both operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Call(_pos, ref mut callee_expr, args) => {
// resolve the thing being called
self.visit_expr(callee_expr, env)?;
// then walk the arg list and resolve those
for arg in args {
self.visit_expr(arg, env)?;
}
}
Expr::Grouping(_pos, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Expr::Literal(_pos, _lit) => {
// nothing to do - literals don't mention vars, and don't have subexpressions
}
Expr::Logical(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve body operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Unary(_pos, _op, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the operand
}
Expr::Variable(pos, name, ref mut resolved_vars) => {
// have to check the scope maps to resolve var expressions
match self.scopes.last() {
None => (),
Some(scope) => {
// check if the var is referring to itself in its initializer
if scope.get(name) == Some(&false) {
self.error(pos.clone(), "cannot read local var in its initializer");
}
}
}
// actually resolve the var
self.resolve_local(name, resolved_vars);
}
}
Ok(())
}
fn visit_literal(&self, _l: &Literal, _env: &Rc<Environment>) -> Result<(), String> {
// nothing to do for these - not going to actually call the visit method above
Ok(())
}
}
| declare | identifier_name |
resolver.rs | use std::collections::HashMap;
use std::rc::Rc;
use crate::ast::{Expr, Identifier, Literal, Stmt, VisitorMut};
use crate::environment::Environment;
use crate::error::Reporter;
use crate::token::Position;
#[derive(Clone, PartialEq)]
pub enum FunctionKind {
None,
Function,
// TODO: add more kinds supposedly...
}
pub struct Resolver {
// track what things are currently in scope, for local block scopes
// (global scope is not tracked)
scopes: Vec<HashMap<String, bool>>,
// track if we are currently in a function, and if so what kind
current_fn: FunctionKind,
// for reporting errors found during this stage
err_reporter: Box<Reporter>,
// keep track of errors encountered
num_errors: u64,
}
impl Resolver {
pub fn new<R: Reporter + 'static>(err_reporter: R) -> Self {
Resolver {
scopes: Vec::new(),
// start out at the top level
current_fn: FunctionKind::None,
err_reporter: Box::new(err_reporter),
num_errors: 0,
}
}
pub fn resolve(&mut self, statements: &mut Vec<Stmt>) -> Result<(), String> {
let environment = Environment::new(None);
for s in statements {
// visit all the statements, and catch any errors
match self.visit_stmt(s, &environment) {
Ok(_) => (),
Err(_) => {
self.num_errors += 1;
}
}
}
if self.num_errors > 0 {
Err(format!("resolver encountered {} error(s)", self.num_errors))
} else {
Ok(())
}
}
// report an error
pub fn error(&mut self, pos: Position, msg: &str) {
self.err_reporter.report(msg, "here", &pos);
self.num_errors += 1;
}
// start a new scope
pub fn begin_scope(&mut self) {
self.scopes.push(HashMap::new());
}
// exit the current scope
pub fn end_scope(&mut self) {
self.scopes.pop();
}
// declare a variable in the current scope
pub fn declare(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// check if this has already been declared
if scope.contains_key(&ident.name.to_string()) {
// report the error, but don't return it
self.error(
ident.pos.clone(),
&format!("variable `{}` re-declared in local scope", ident.name),
);
} else {
// mark that the var exists, but is not yet initialized
scope.insert(ident.name.to_string(), false);
}
}
}
}
// define a variable in the current scope
pub fn define(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// mark that the var exists, and is now initialized
scope.insert(ident.name.to_string(), true);
}
}
}
// figure out where the var will resolve, and
// store that in the interpreter
pub fn resolve_local(&mut self, name: &str, resolved_dist: &mut Option<usize>) {
// start at the innermost scope and work outwards
for (dist, scope) in self.scopes.iter().rev().enumerate() {
if scope.contains_key(name) {
// NOTE:
// For the book this info is stored in a HashMap in the interpreter,
// like HashMap<Expr, u64>,
// which I tried, but then `Eq` and `Hash` have to be derived for all kinds
// of things, and `f64` doesn't implement `Eq`, and I don't want to manually
// implement it, not to mention `Hash` (which I didn't try).
//
// So, where should I store this info?
// From the book: "One obvious place is right in the syntax tree node itself."
// (the book does not take that approach, because "it would require mucking
// around with our syntax tree generator")
//
// I'm not using their generator anyway, so that's where I'm going to store
// this info - in the AST node itself.
*resolved_dist = Some(dist);
return;
}
}
// not found, assume it's global
}
pub fn resolve_function(
&mut self,
params: &Vec<Identifier>,
body: &mut Stmt,
env: &Rc<Environment>,
kind: FunctionKind,
) -> Result<(), String> {
// use the call stack to save the enclosing function kind,
// then set the current one
let enclosing_fn = self.current_fn.clone();
self.current_fn = kind;
// create a new scope for the function body
self.begin_scope();
// bind vars for each of the function parameters
for param in params {
self.declare(param);
self.define(param);
}
self.visit_stmt(body, env)?;
self.end_scope();
// back to whatever function may be enclosing this one
self.current_fn = enclosing_fn;
Ok(())
}
}
// mut because the resolver needs to modify Expr with resolved distance
impl VisitorMut<()> for Resolver {
type Error = String;
fn visit_stmt(&mut self, s: &mut Stmt, env: &Rc<Environment>) -> Result<(), String> {
match s {
Stmt::Block(statements) => {
// blocks create the local scopes for statements
self.begin_scope();
for stmt in statements {
// just have to resolve each statement in turn
self.visit_stmt(stmt, env)?;
}
self.end_scope();
}
Stmt::Expression(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Function(name, params, ref mut body) => {
// functions bind var names and create a local scope
// first, handle the binding of the function name
// (eagerly, so the function can recursively refer to itself)
self.declare(name);
self.define(name);
// then handle the function body
self.resolve_function(params, body, env, FunctionKind::Function)?;
}
Stmt::If(ref mut if_expr, ref mut then_stmt, ref mut opt_else_stmt) => {
// resolve the condition and both branches
self.visit_expr(if_expr, env)?;
self.visit_stmt(then_stmt, env)?;
if let Some(s) = opt_else_stmt {
self.visit_stmt(s, env)?;
}
}
Stmt::Print(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Return(ref mut expr) => {
// check that we are actually in a function
// TODO: this should probably use the position of the Stmt
// (BUT, there is not Position for Stmt, so have to implement that...)
if self.current_fn == FunctionKind::None {
self.error(expr.position().clone(), "cannot return from top-level code");
}
self.visit_expr(expr, env)?; // resolve the parts
} | // variable binding is split into 2 steps - declaring and defining
self.declare(name);
self.visit_expr(expr, env)?;
self.define(name);
}
Stmt::While(ref mut condition_expr, ref mut body) => {
// resolve the condition and body
self.visit_expr(condition_expr, env)?;
self.visit_stmt(body, env)?;
}
}
Ok(())
}
fn visit_expr(&mut self, e: &mut Expr, env: &Rc<Environment>) -> Result<(), String> {
match e {
Expr::Assign(_pos, var_name, ref mut expr, ref mut resolved_vars) => {
// resolve the expr first in case it also contains other vars
self.visit_expr(expr, env)?;
// then resolve the var being assigned to
self.resolve_local(var_name, resolved_vars);
}
Expr::Binary(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve both operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Call(_pos, ref mut callee_expr, args) => {
// resolve the thing being called
self.visit_expr(callee_expr, env)?;
// then walk the arg list and resolve those
for arg in args {
self.visit_expr(arg, env)?;
}
}
Expr::Grouping(_pos, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Expr::Literal(_pos, _lit) => {
// nothing to do - literals don't mention vars, and don't have subexpressions
}
Expr::Logical(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve body operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Unary(_pos, _op, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the operand
}
Expr::Variable(pos, name, ref mut resolved_vars) => {
// have to check the scope maps to resolve var expressions
match self.scopes.last() {
None => (),
Some(scope) => {
// check if the var is referring to itself in its initializer
if scope.get(name) == Some(&false) {
self.error(pos.clone(), "cannot read local var in its initializer");
}
}
}
// actually resolve the var
self.resolve_local(name, resolved_vars);
}
}
Ok(())
}
fn visit_literal(&self, _l: &Literal, _env: &Rc<Environment>) -> Result<(), String> {
// nothing to do for these - not going to actually call the visit method above
Ok(())
}
} | Stmt::Var(name, ref mut expr) => {
// this adds a new entry to the innermost scope | random_line_split |
list-view.js | /**
* @created 2014-3-12 19:27:29
*/
define(['App',
'tpl!app/oms/settle/total-account-detail/list/templates/table-ct.tpl',
'tpl!app/oms/settle/total-account-detail/list/templates/total-account-detail.tpl',
'i18n!app/oms/common/nls/settle',
'jquery.jqGrid',
'jquery.validate',
'bootstrap-datepicker'
], function(App, tableCtTpl, totalAcctDetailTpl, settleLang) {
var STLMTYPE_MAP = {
"0" : settleLang._("total-account-detail.stlmType.0"),
"1" : settleLang._("total-account-detail.stlmType.1"),
"2" : settleLang._("total-account-detail.stlmType.2")
};
var STAT_MAP = {
"0" : "审核通过",
"1" : "审核拒绝",
"2" : "初始化申请"
};
App.module('SettleApp.TotalAccountDetail.List.View', function(View, App, Backbone, Marionette, $, _) {
View.TotalAccountDetails = Marionette.ItemView.extend({
tabId: 'menu.total.account.detail',
template: tableCtTpl,
events: {
},
onRender: function() {
var me = this;
setTimeout(function() {
me.renderGrid();
},1);
},
getRequestDateSelect: function(gird) {
var data = {};
data['id'] = Opf.Grid.getSelRowId(gird);
var $form = $('form.form-total-account-detall').find(':input');
$form.each(function() {
data[$(this).attr('name')] = $(this).val();
});
return {
url: 'api/settle/total-account-details/' + data['id'] + '/update',
data: data,
type: "PUT",
contentType: "application/json",
dataType: "json",
needData: true
};
},
ajaxRequest: function(options, dialog, grid) {
$.ajax({
type: options.type,
contentType: options.contentType,
dataType: options.dataType,
url: options.url,
data: options.needData ? JSON.stringify(options.data) : "",
success: function(resp) {
console.log('审核结果:' + (resp.success || '') + (resp.msg || ''));
$(dialog).dialog("destroy");
$(grid).trigger("reloadGrid", [{current:true}]);
if(resp.success) {
Opf.Toast.success('操作成功');
}
},
error: function(resp) {
console.error(resp.msg || resp.success || resp);
$(dialog).dialog("destroy");
}
});
},
ajaxGetAccount: function(form, value, select){
$.ajax({
type: 'GET',
contentType: 'application/json',
dataType: 'json',
url: url._('stlm.account') + '/' + value + '/accountInfo',
success: function(resp) {
var $form = $(form),
$select = $(select);
$select.empty();
for(var i=0; i<resp.length; i++) {
var appendedData = '<option value="' + resp[i].key + '">' + resp[i].value + '</option>';
$select.append(appendedData);
}
},
error: function(resp){
console.error(resp);
}
});
},
attachValidation: function() {
return {
setupValidation: Opf.Validate.setup,
addValidateRules: function(form){
Opf.Validate.addRules(form, {
rules:{
stlmType:{
required: true
},
outAcctId:{
required: true
},
txAmt:{
required: true,
float: true
},
oprMsg:{
required: true,
maxlength: 300
}
}
});
}
};
},
onClickButton: function(roleGird) {
var me = this;
var tpl = totalAcctDetailTpl();
var $dialog = $(tpl).dialog({
autoOpen: true,
height: Opf.Config._('ui', 'totalAccountDetail.grid.form.extra.height'), //300,
width: Opf.Config._('ui', 'totalAccountDetail.grid.form.extra.width'), //350,
modal: true,
buttons: [{
html: "<i class='icon-ok'></i> 提交",
"class" : "btn btn-xs btn-primary",
click: function(e) {
| }
if(valid){
$($(e.target).closest('button')).addClass('disabled').find('span').html("<i class='icon-ok'></i> 正在提交...");
me.ajaxRequest(me.getRequestDateSelect(roleGird), this, roleGird);
}
}
}, {
html: "<i class='icon-remove'></i> 取消",
"class" : "btn btn-xs",
click: function() {
$( this ).dialog( "destroy" );
}
}],
create: function() {
Opf.Validate.addRules($('form.form-total-account-detall'), {
rules: {
oprMsg2: {
required: true,
maxlength: 300
}
}
});
$(this).prev('.ui-widget-header').find('.ui-dialog-title').addClass('settle-styles-paddingL-15');
},
close: function() {
$( this ).dialog( "destroy" );
}
});
},
renderGrid: function() {
var me = this;
var validation = this.attachValidation();
var roleGird = App.Factory.createJqGrid({
rsId:'totalAccountDetail',
caption: settleLang._('totalAccountDetail.txt'),
jsonReader: {
},
actionsCol: {
// width: Opf.Config._('ui', 'totalAccountDetail.grid.form.actionsCol.width'), // 130,
edit : false,
del: false,
extraButtons: [
{name: 'check', title:'审核', icon: 'icon-opf-verify icon-opf-verify-color', click: function() {
me.onClickButton(roleGird);
}}
],
canButtonRender: function(name, opts, rowData) {
// return true;
// 初始申请的情况下才可以显示审核按钮
if(name === 'check' && rowData.stat !== '2') {
return false;
}
}
},
nav: {
formSize: {
width: Opf.Config._('ui', 'totalAccountDetail.grid.form.width'),
height: Opf.Config._('ui', 'totalAccountDetail.grid.form.height')
},
add : {
beforeShowForm: function(form) {
validation.addValidateRules(form);
var $select = $(form).find('select[name="stlmType"]');
$select.on('change', function(){
var $acctSelect = $('#tr_outAcctId').find('select');
var $caption = $('#tr_outAcctId').find('.CaptionTD');
var value = $(this).val();
if (value === '0') {
$acctSelect.attr('name', 'outAcctId');
$caption.empty().append('付款账户');
me.ajaxGetAccount(form, value, $acctSelect);
} else if (value === '1') {
$acctSelect.attr('name', 'outAcctId');
$caption.empty().append('付款账户');
me.ajaxGetAccount(form, value, $acctSelect);
} else if (value === '2') {
$acctSelect.attr('name', 'inAcctId');
$caption.empty().append('收款账户');
me.ajaxGetAccount(form, value, $acctSelect);
}
});
$select.trigger('change');
},
beforeSubmit: validation.setupValidation
},
edit: {
beforeShowForm: function(form) {
validation.addValidateRules(form);
},
beforeSubmit: validation.setupValidation
},
view: {
width: Opf.Config._('ui', 'totalAccountDetail.grid.viewform.width'),
height: Opf.Config._('ui', 'totalAccountDetail.grid.viewform.height')
}
},
gid: 'total-account-details-grid',//innerly get corresponding ct '#total-account-details-grid-table' '#total-account-details-grid-pager'
url: url._('total.account.detail'),
colNames: {
id : settleLang._('total.account.detail.id'), //ID
stlmDate : settleLang._('total.account.detail.stlm.date'), //账务日期
stlmType : settleLang._('total.account.detail.stlm.type'), //账务维护类型
txAmt : settleLang._('total.account.detail.tx.amt'), //交易金额
realetiveAccount : settleLang._('total.account.detail.realetive.account'),
oprName : settleLang._('total.account.detail.opr.name'),
oprMsg : settleLang._('total.account.detail.opr.msg'), //操作描述
//add database message because the database had changed in 20140321
stat : settleLang._('total.account.detail.stat'), //状态
//add end
oprMsg2 : settleLang._('total.account.detail.opr.msg2'), //复审描述
oprName2 : settleLang._('total.account.detail.opr.name2'),
//add rows
outAcctNo : settleLang._('total.account.detail.out.acct.no'),
inAcctNo : settleLang._('total.account.detail.in.acct.no'),
//add end
//
outAcctId : settleLang._('total.account.detail.out.acct.id'), //付款账户Id
inAcctId : settleLang._('total.account.detail.in.acct.id'), //收款账户Id
oprId : settleLang._('total.account.detail.opr.id'), //操作员
oprId2 : settleLang._('total.account.detail.opr.id2'), //复审员
recOprTime : settleLang._('total.account.detail.rec.opr.time'), //操作时间
recOprTime2 : settleLang._('total.account.detail.rec.opr.time2') //复审时间
},
responsiveOptions: {
hidden: {
ss: ['realetiveAccount', 'oprName', 'oprMsg', 'stat', 'oprMsg2', 'oprName2'],
xs: ['oprName', 'oprMsg', 'stat', 'oprMsg2', 'oprName2'],
sm: ['stat', 'oprMsg2', 'oprName2'],
md: ['oprMsg2', 'oprName2'],
ld: []
}
},
colModel: [
{ name: 'id', index: 'id', editable: false, hidden: true }, //ID
{ name: 'stlmDate', index: 'stlmDate', width: 100, search: true, editable: false,
searchoptions: {
dataInit : function (elem) {
$(elem).datepicker( {autoclose: true, format: "yyyymmdd"} );
},
sopt: ['eq', 'ne', 'lt', 'le', 'gt', 'ge']
}
}, //账务日期
{name: 'stlmType', index: 'stlmType', width: 90, search:true, editable: true, formatter: stlmTypeFormatter,
stype: 'select',
searchoptions: {
value: STLMTYPE_MAP,
sopt: ['eq','ne']
},
edittype:'select',
editoptions: {
value: STLMTYPE_MAP
}
}, //账务维护类型
{name: 'txAmt', index: 'txAmt', search:true, editable: true, formatter: Opf.currencyFormatter,
_searchType:'num'
}, //交易金额
{name: 'realetiveAccount', index: 'realetiveAccount', width:250, search:false,editable:false, formatter : function(val, options, obj){
var account = obj.outAcctNo || obj.inAcctNo || '';
return account;
}},
{name: 'oprName', index: 'oprName', search:true, width: 90, editable:false,
_searchType:'string'
},
{name: 'oprMsg', index: 'oprMsg', search:false,editable: true, edittype: 'textarea'}, //操作描述
//add database message because the database had changed in 20140321
{name: 'stat', index: 'stat', search:true, editable:false, formatter: statFormatter,
stype: 'select',
searchoptions: {
value: STAT_MAP,
sopt: ['eq','ne']
}
},//状态
//add end
{name: 'oprMsg2', index: 'oprMsg2', search:false,editable: false}, //复审描述
{name: 'oprId2', index: 'oprId2', search:false,editable: false, hidden: true, viewable: false}, //复审员
//
//add rows
{name: 'outAcctNo', index: 'outAcctNo', search:false,editable:false, hidden : true, viewable: false/*,
searchoptions: {
sopt: ['eq']
}*/
},
{name: 'inAcctNo', index: 'inAcctNo', search:false,editable:false, hidden : true, viewable: false/*,
searchoptions: {
sopt: ['eq']
}*/
},
{name: 'oprName2', index: 'oprName2', search:true, width: 90, editable:false,
_searchType:'string'
},
//add end
//
{name: 'outAcctId', index: 'outAcctId', search:false,editable: true, hidden: true, viewable: false, edittype: 'select'}, //付款账户Id
{name: 'inAcctId', index: 'inAcctId', search:false,editable: false, hidden: true, viewable: false}, //收款账户Id
{name: 'oprId', index: 'oprId', search:false,editable: false, hidden: true, viewable: false}, //操作员
{name: 'recOprTime', index: 'recOprTime', search: false, editable: false, hidden:true,
editoptions: {
dataInit : function (elem) {
$(elem).datepicker({ autoclose: true, format: 'yyyymmdd' })
.on("changeDate changeMonth changeYear", function(oDate) {
$(oDate.target).valid();
});
}
}
}, //操作时间
{name: 'recOprTime2', index: 'recOprTime2', search: false, editable: false, hidden:true,
editoptions: {
dataInit : function (elem) {
$(elem).datepicker({ autoclose: true, format: 'yyyymmdd' })
.on("changeDate changeMonth changeYear", function(oDate) {
$(oDate.target).valid();
});
}
}
} //复审时间
],
loadComplete: function() {}
});
}
});
});
function stlmTypeFormatter (val) {
return STLMTYPE_MAP[val];
}
function statFormatter (val) {
return STAT_MAP[val];
}
return App.SettleApp.TotalAccountDetail.List.View;
}); | var $form = $('form.form-total-account-detall');
var validator = $form.validate();
var valid = true;
if(validator && !validator.form()){
valid = false; | random_line_split |
list-view.js | /**
* @created 2014-3-12 19:27:29
*/
define(['App',
'tpl!app/oms/settle/total-account-detail/list/templates/table-ct.tpl',
'tpl!app/oms/settle/total-account-detail/list/templates/total-account-detail.tpl',
'i18n!app/oms/common/nls/settle',
'jquery.jqGrid',
'jquery.validate',
'bootstrap-datepicker'
], function(App, tableCtTpl, totalAcctDetailTpl, settleLang) {
var STLMTYPE_MAP = {
"0" : settleLang._("total-account-detail.stlmType.0"),
"1" : settleLang._("total-account-detail.stlmType.1"),
"2" : settleLang._("total-account-detail.stlmType.2")
};
var STAT_MAP = {
"0" : "审核通过",
"1" : "审核拒绝",
"2" : "初始化申请"
};
App.module('SettleApp.TotalAccountDetail.List.View', function(View, App, Backbone, Marionette, $, _) {
View.TotalAccountDetails = Marionette.ItemView.extend({
tabId: 'menu.total.account.detail',
template: tableCtTpl,
events: {
},
onRender: function() {
var me = this;
setTimeout(function() {
me.renderGrid();
},1);
},
getRequestDateSelect: function(gird) {
var data = {};
data['id'] = Opf.Grid.getSelRowId(gird);
var $form = $('form.form-total-account-detall').find(':input');
$form.each(function() {
data[$(this).attr('name')] = $(this).val();
});
return {
url: 'api/settle/total-account-details/' + data['id'] + '/update',
data: data,
type: "PUT",
contentType: "application/json",
dataType: "json",
needData: true
};
},
ajaxRequest: function(options, dialog, grid) {
$.ajax({
type: options.type,
contentType: options.contentType,
dataType: options.dataType,
url: options.url,
data: options.needData ? JSON.stringify(options.data) : "",
success: function(resp) {
console.log('审核结果:' + (resp.success || '') + (resp.msg || ''));
$(dialog).dialog("destroy");
$(grid).trigger("reloadGrid", [{current:true}]);
if(resp.success) {
Opf.Toast.success('操作成功');
}
},
error: function(resp) {
console.error(resp.msg || resp.success || resp);
$(dialog).dialog("destroy");
}
});
},
ajaxGetAccount: function(form, value, select){
$.ajax({
type: 'GET',
contentType: 'application/json',
dataType: 'json',
url: url._('stlm.account') + '/' + value + '/accountInfo',
success: function(resp) {
var $form = $(form),
$select = $(select);
$select.empty();
for(var i=0; i<resp.length; i++) {
var appendedData = '<option value="' + resp[i].key + '">' + resp[i].value + '</option>';
$select.append(appendedData);
}
},
error: function(resp){
console.error(resp);
}
});
},
attachValidation: function() {
return {
setupValidation: Opf.Validate.setup,
addValidateRules: function(form){
Opf.Validate.addRules(form, {
rules:{
stlmType:{
required: true
},
outAcctId:{
required: true
},
txAmt:{
required: true,
float: true
},
oprMsg:{
required: true,
maxlength: 300
}
}
});
}
};
},
onClickButton: function(roleGird) {
var me = this;
var tpl = totalAcctDetailTpl();
var $dialog = $(tpl).dialog({
autoOpen: true,
height: Opf.Config._('ui', 'totalAccountDetail.grid.form.extra.height'), //300,
width: Opf.Config._('ui', 'totalAccountDetail.grid.form.extra.width'), //350,
modal: true,
buttons: [{
html: "<i class='icon-ok'></i> 提交",
"class" : "btn btn-xs btn-primary",
click: function(e) {
var $form = $('form.form-total-account-detall');
var validator = $form.validate();
var valid = true;
if(validator && !validator.form()){
valid = false;
}
if(valid){
$($(e.target).closest('button')).addClass('disabled').find('span').html("<i class='icon-ok'></i> 正在提交...");
me.ajaxRequest(me.getRequestDateSelect(roleGird), this, roleGird);
}
}
}, {
html: "<i class='icon-remove'></i> 取消",
"class" : "btn btn-xs",
click: function() {
$( this ).dialog( "destroy" );
}
}],
create: function() {
Opf.Validate.addRules($('form.form-total-account-detall'), {
rules: {
oprMsg2: {
required: true,
maxlength: 300
}
}
});
$(this).prev('.ui-widget-header').find('.ui-dialog-title').addClass('settle-styles-paddingL-15');
},
close: function() {
$( this ).dialog( "destroy" );
}
});
},
renderGrid: function() {
var me = this;
var validation = this.attachValidation();
var roleGird = App.Factory.createJqGrid({
rsId:'totalAccountDetail',
caption: settleLang._('totalAccountDetail.txt'),
jsonReader: {
},
actionsCol: {
// width: Opf.Config._('ui', 'totalAccountDetail.grid.form.actionsCol.width'), // 130,
edit : false,
del: false,
extraButtons: [
{name: 'check', title:'审核', icon: 'icon-opf-verify icon-opf-verify-color', click: function() {
me.onClickButton(roleGird);
}}
],
canButtonRender: function(name, opts, rowData) {
// return true;
// 初始申请的情况下才可以显示审核按钮
if(name === 'check' && rowData.stat !== '2') {
return false;
}
}
},
nav: {
formSize: {
width: Opf.Config._('ui', 'totalAccountDetail.grid.form.width'),
height: Opf.Config._('ui', 'totalAccountDetail.grid.form.height')
},
add : {
beforeShowForm: function(form) {
validation.addValidateRules(form);
var $select = $(form).find('select[name="stlmType"]');
$select.on('change', function(){
var $acctSelect = $('#tr_outAcctId').find('select');
var $caption = $('#tr_outAcctId').find('.CaptionTD');
var value = $(this).val();
if (value === '0') {
$acctSelect.attr('name', 'outAcctId');
$caption.empty().append('付款账户');
| end('付款账户');
me.ajaxGetAccount(form, value, $acctSelect);
} else if (value === '2') {
$acctSelect.attr('name', 'inAcctId');
$caption.empty().append('收款账户');
me.ajaxGetAccount(form, value, $acctSelect);
}
});
$select.trigger('change');
},
beforeSubmit: validation.setupValidation
},
edit: {
beforeShowForm: function(form) {
validation.addValidateRules(form);
},
beforeSubmit: validation.setupValidation
},
view: {
width: Opf.Config._('ui', 'totalAccountDetail.grid.viewform.width'),
height: Opf.Config._('ui', 'totalAccountDetail.grid.viewform.height')
}
},
gid: 'total-account-details-grid',//innerly get corresponding ct '#total-account-details-grid-table' '#total-account-details-grid-pager'
url: url._('total.account.detail'),
colNames: {
id : settleLang._('total.account.detail.id'), //ID
stlmDate : settleLang._('total.account.detail.stlm.date'), //账务日期
stlmType : settleLang._('total.account.detail.stlm.type'), //账务维护类型
txAmt : settleLang._('total.account.detail.tx.amt'), //交易金额
realetiveAccount : settleLang._('total.account.detail.realetive.account'),
oprName : settleLang._('total.account.detail.opr.name'),
oprMsg : settleLang._('total.account.detail.opr.msg'), //操作描述
//add database message because the database had changed in 20140321
stat : settleLang._('total.account.detail.stat'), //状态
//add end
oprMsg2 : settleLang._('total.account.detail.opr.msg2'), //复审描述
oprName2 : settleLang._('total.account.detail.opr.name2'),
//add rows
outAcctNo : settleLang._('total.account.detail.out.acct.no'),
inAcctNo : settleLang._('total.account.detail.in.acct.no'),
//add end
//
outAcctId : settleLang._('total.account.detail.out.acct.id'), //付款账户Id
inAcctId : settleLang._('total.account.detail.in.acct.id'), //收款账户Id
oprId : settleLang._('total.account.detail.opr.id'), //操作员
oprId2 : settleLang._('total.account.detail.opr.id2'), //复审员
recOprTime : settleLang._('total.account.detail.rec.opr.time'), //操作时间
recOprTime2 : settleLang._('total.account.detail.rec.opr.time2') //复审时间
},
responsiveOptions: {
hidden: {
ss: ['realetiveAccount', 'oprName', 'oprMsg', 'stat', 'oprMsg2', 'oprName2'],
xs: ['oprName', 'oprMsg', 'stat', 'oprMsg2', 'oprName2'],
sm: ['stat', 'oprMsg2', 'oprName2'],
md: ['oprMsg2', 'oprName2'],
ld: []
}
},
colModel: [
{ name: 'id', index: 'id', editable: false, hidden: true }, //ID
{ name: 'stlmDate', index: 'stlmDate', width: 100, search: true, editable: false,
searchoptions: {
dataInit : function (elem) {
$(elem).datepicker( {autoclose: true, format: "yyyymmdd"} );
},
sopt: ['eq', 'ne', 'lt', 'le', 'gt', 'ge']
}
}, //账务日期
{name: 'stlmType', index: 'stlmType', width: 90, search:true, editable: true, formatter: stlmTypeFormatter,
stype: 'select',
searchoptions: {
value: STLMTYPE_MAP,
sopt: ['eq','ne']
},
edittype:'select',
editoptions: {
value: STLMTYPE_MAP
}
}, //账务维护类型
{name: 'txAmt', index: 'txAmt', search:true, editable: true, formatter: Opf.currencyFormatter,
_searchType:'num'
}, //交易金额
{name: 'realetiveAccount', index: 'realetiveAccount', width:250, search:false,editable:false, formatter : function(val, options, obj){
var account = obj.outAcctNo || obj.inAcctNo || '';
return account;
}},
{name: 'oprName', index: 'oprName', search:true, width: 90, editable:false,
_searchType:'string'
},
{name: 'oprMsg', index: 'oprMsg', search:false,editable: true, edittype: 'textarea'}, //操作描述
//add database message because the database had changed in 20140321
{name: 'stat', index: 'stat', search:true, editable:false, formatter: statFormatter,
stype: 'select',
searchoptions: {
value: STAT_MAP,
sopt: ['eq','ne']
}
},//状态
//add end
{name: 'oprMsg2', index: 'oprMsg2', search:false,editable: false}, //复审描述
{name: 'oprId2', index: 'oprId2', search:false,editable: false, hidden: true, viewable: false}, //复审员
//
//add rows
{name: 'outAcctNo', index: 'outAcctNo', search:false,editable:false, hidden : true, viewable: false/*,
searchoptions: {
sopt: ['eq']
}*/
},
{name: 'inAcctNo', index: 'inAcctNo', search:false,editable:false, hidden : true, viewable: false/*,
searchoptions: {
sopt: ['eq']
}*/
},
{name: 'oprName2', index: 'oprName2', search:true, width: 90, editable:false,
_searchType:'string'
},
//add end
//
{name: 'outAcctId', index: 'outAcctId', search:false,editable: true, hidden: true, viewable: false, edittype: 'select'}, //付款账户Id
{name: 'inAcctId', index: 'inAcctId', search:false,editable: false, hidden: true, viewable: false}, //收款账户Id
{name: 'oprId', index: 'oprId', search:false,editable: false, hidden: true, viewable: false}, //操作员
{name: 'recOprTime', index: 'recOprTime', search: false, editable: false, hidden:true,
editoptions: {
dataInit : function (elem) {
$(elem).datepicker({ autoclose: true, format: 'yyyymmdd' })
.on("changeDate changeMonth changeYear", function(oDate) {
$(oDate.target).valid();
});
}
}
}, //操作时间
{name: 'recOprTime2', index: 'recOprTime2', search: false, editable: false, hidden:true,
editoptions: {
dataInit : function (elem) {
$(elem).datepicker({ autoclose: true, format: 'yyyymmdd' })
.on("changeDate changeMonth changeYear", function(oDate) {
$(oDate.target).valid();
});
}
}
} //复审时间
],
loadComplete: function() {}
});
}
});
});
function stlmTypeFormatter (val) {
return STLMTYPE_MAP[val];
}
function statFormatter (val) {
return STAT_MAP[val];
}
return App.SettleApp.TotalAccountDetail.List.View;
}); | me.ajaxGetAccount(form, value, $acctSelect);
} else if (value === '1') {
$acctSelect.attr('name', 'outAcctId');
$caption.empty().app | conditional_block |
list-view.js | /**
* @created 2014-3-12 19:27:29
*/
define(['App',
'tpl!app/oms/settle/total-account-detail/list/templates/table-ct.tpl',
'tpl!app/oms/settle/total-account-detail/list/templates/total-account-detail.tpl',
'i18n!app/oms/common/nls/settle',
'jquery.jqGrid',
'jquery.validate',
'bootstrap-datepicker'
], function(App, tableCtTpl, totalAcctDetailTpl, settleLang) {
var STLMTYPE_MAP = {
"0" : settleLang._("total-account-detail.stlmType.0"),
"1" : settleLang._("total-account-detail.stlmType.1"),
"2" : settleLang._("total-account-detail.stlmType.2")
};
var STAT_MAP = {
"0" : "审核通过",
"1" : "审核拒绝",
"2" : "初始化申请"
};
App.module('SettleApp.TotalAccountDetail.List.View', function(View, App, Backbone, Marionette, $, _) {
View.TotalAccountDetails = Marionette.ItemView.extend({
tabId: 'menu.total.account.detail',
template: tableCtTpl,
events: {
},
onRender: function() {
var me = this;
setTimeout(function() {
me.renderGrid();
},1);
},
getRequestDateSelect: function(gird) {
var data = {};
data['id'] = Opf.Grid.getSelRowId(gird);
var $form = $('form.form-total-account-detall').find(':input');
$form.each(function() {
data[$(this).attr('name')] = $(this).val();
});
return {
url: 'api/settle/total-account-details/' + data['id'] + '/update',
data: data,
type: "PUT",
contentType: "application/json",
dataType: "json",
needData: true
};
},
ajaxRequest: function(options, dialog, grid) {
$.ajax({
type: options.type,
contentType: options.contentType,
dataType: options.dataType,
url: options.url,
data: options.needData ? JSON.stringify(options.data) : "",
success: function(resp) {
console.log('审核结果:' + (resp.success || '') + (resp.msg || ''));
$(dialog).dialog("destroy");
$(grid).trigger("reloadGrid", [{current:true}]);
if(resp.success) {
Opf.Toast.success('操作成功');
}
},
error: function(resp) {
console.error(resp.msg || resp.success || resp);
$(dialog).dialog("destroy");
}
});
},
ajaxGetAccount: function(form, value, select){
$.ajax({
type: 'GET',
contentType: 'application/json',
dataType: 'json',
url: url._('stlm.account') + '/' + value + '/accountInfo',
success: function(resp) {
var $form = $(form),
$select = $(select);
$select.empty();
for(var i=0; i<resp.length; i++) {
var appendedData = '<option value="' + resp[i].key + '">' + resp[i].value + '</option>';
$select.append(appendedData);
}
},
error: function(resp){
console.error(resp);
}
});
},
attachValidation: function() {
return {
setupValidation: Opf.Validate.setup,
addValidateRules: function(form){
Opf.Validate.addRules(form, {
rules:{
stlmType:{
required: true
},
outAcctId:{
required: true
},
txAmt:{
required: true,
float: true
},
oprMsg:{
required: true,
maxlength: 300
}
}
});
}
};
},
onClickButton: function(roleGird) {
var me = this;
var tpl = totalAcctDetailTpl();
var $dialog = $(tpl).dialog({
autoOpen: true,
height: Opf.Config._('ui', 'totalAccountDetail.grid.form.extra.height'), //300,
width: Opf.Config._('ui', 'totalAccountDetail.grid.form.extra.width'), //350,
modal: true,
buttons: [{
html: "<i class='icon-ok'></i> 提交",
"class" : "btn btn-xs btn-primary",
click: function(e) {
var $form = $('form.form-total-account-detall');
var validator = $form.validate();
var valid = true;
if(validator && !validator.form()){
valid = false;
}
if(valid){
$($(e.target).closest('button')).addClass('disabled').find('span').html("<i class='icon-ok'></i> 正在提交...");
me.ajaxRequest(me.getRequestDateSelect(roleGird), this, roleGird);
}
}
}, {
html: "<i class='icon-remove'></i> 取消",
"class" : "btn btn-xs",
click: function() {
$( this ).dialog( "destroy" );
}
}],
create: function() {
Opf.Validate.addRules($('form.form-total-account-detall'), {
rules: {
oprMsg2: {
required: true,
maxlength: 300
}
}
});
$(this).prev('.ui-widget-header').find('.ui-dialog-title').addClass('settle-styles-paddingL-15');
},
close: function() {
$( this ).dialog( "destroy" );
}
});
},
renderGrid: function() {
var me = this;
var validation = this.attachValidation();
var roleGird = App.Factory.createJqGrid({
rsId:'totalAccountDetail',
caption: settleLang._('totalAccountDetail.txt'),
jsonReader: {
},
actionsCol: {
// width: Opf.Config._('ui', 'totalAccountDetail.grid.form.actionsCol.width'), // 130,
edit : false,
del: false,
extraButtons: [
{name: 'check', title:'审核', icon: 'icon-opf-verify icon-opf-verify-color', click: function() {
me.onClickButton(roleGird);
}}
],
canButtonRender: function(name, opts, rowData) {
// return true;
// 初始申请的情况下才可以显示审核按钮
if(name === 'check' && rowData.stat !== '2') {
return false;
}
}
},
nav: {
formSize: {
width: Opf.Config._('ui', 'totalAccountDetail.grid.form.width'),
height: Opf.Config._('ui', 'totalAccountDetail.grid.form.height')
},
add : {
beforeShowForm: function(form) {
validation.addValidateRules(form);
var $select = $(form).find('select[name="stlmType"]');
$select.on('change', function(){
var $acctSelect = $('#tr_outAcctId').find('select');
var $caption = $('#tr_outAcctId').find('.CaptionTD');
var value = $(this).val();
if (value === '0') {
$acctSelect.attr('name', 'outAcctId');
$caption.empty().append('付款账户');
me.ajaxGetAccount(form, value, $acctSelect);
} else if (value === '1') {
$acctSelect.attr('name', 'outAcctId');
$caption.empty().append('付款账户');
me.ajaxGetAccount(form, value, $acctSelect);
} else if (value === '2') {
$acctSelect.attr('name', 'inAcctId');
$caption.empty().append('收款账户');
me.ajaxGetAccount(form, value, $acctSelect);
}
});
$select.trigger('change');
},
beforeSubmit: validation.setupValidation
},
edit: {
beforeShowForm: function(form) {
validation.addValidateRules(form);
},
beforeSubmit: validation.setupValidation
},
view: {
width: Opf.Config._('ui', 'totalAccountDetail.grid.viewform.width'),
height: Opf.Config._('ui', 'totalAccountDetail.grid.viewform.height')
}
},
gid: 'total-account-details-grid',//innerly get corresponding ct '#total-account-details-grid-table' '#total-account-details-grid-pager'
url: url._('total.account.detail'),
colNames: {
id : settleLang._('total.account.detail.id'), //ID
stlmDate : settleLang._('total.account.detail.stlm.date'), //账务日期
stlmType : settleLang._('total.account.detail.stlm.type'), //账务维护类型
txAmt : settleLang._('total.account.detail.tx.amt'), //交易金额
realetiveAccount : settleLang._('total.account.detail.realetive.account'),
oprName : settleLang._('total.account.detail.opr.name'),
oprMsg : settleLang._('total.account.detail.opr.msg'), //操作描述
//add database message because the database had changed in 20140321
stat : settleLang._('total.account.detail.stat'), //状态
//add end
oprMsg2 : settleLang._('total.account.detail.opr.msg2'), //复审描述
oprName2 : settleLang._('total.account.detail.opr.name2'),
//add rows
outAcctNo : settleLang._('total.account.detail.out.acct.no'),
inAcctNo : settleLang._('total.account.detail.in.acct.no'),
//add end
//
outAcctId : settleLang._('total.account.detail.out.acct.id'), //付款账户Id
inAcctId : settleLang._('total.account.detail.in.acct.id'), //收款账户Id
oprId : settleLang._('total.account.detail.opr.id'), //操作员
oprId2 : settleLang._('total.account.detail.opr.id2'), //复审员
recOprTime : settleLang._('total.account.detail.rec.opr.time'), //操作时间
recOprTime2 : settleLang._('total.account.detail.rec.opr.time2') //复审时间
},
responsiveOptions: {
hidden: {
ss: ['realetiveAccount', 'oprName', 'oprMsg', 'stat', 'oprMsg2', 'oprName2'],
xs: ['oprName', 'oprMsg', 'stat', 'oprMsg2', 'oprName2'],
sm: ['stat', 'oprMsg2', 'oprName2'],
md: ['oprMsg2', 'oprName2'],
ld: []
}
},
colModel: [
{ name: 'id', index: 'id', editable: false, hidden: true }, //ID
{ name: 'stlmDate', index: 'stlmDate', width: 100, search: true, editable: false,
searchoptions: {
dataInit : function (elem) {
$(elem).datepicker( {autoclose: true, format: "yyyymmdd"} );
},
sopt: ['eq', 'ne', 'lt', 'le', 'gt', 'ge']
}
}, //账务日期
{name: 'stlmType', index: 'stlmType', width: 90, search:true, editable: true, formatter: stlmTypeFormatter,
stype: 'select',
searchoptions: {
value: STLMTYPE_MAP,
sopt: ['eq','ne']
},
edittype:'select',
editoptions: {
value: STLMTYPE_MAP
}
}, //账务维护类型
{name: 'txAmt', index: 'txAmt', search:true, editable: true, formatter: Opf.currencyFormatter,
_searchType:'num'
}, //交易金额
{name: 'realetiveAccount', index: 'realetiveAccount', width:250, search:false,editable:false, formatter : function(val, options, obj){
var account = obj.outAcctNo || obj.inAcctNo || '';
return account;
}},
{name: 'oprName', index: 'oprName', search:true, width: 90, editable:false,
_searchType:'string'
},
{name: 'oprMsg', index: 'oprMsg', search:false,editable: true, edittype: 'textarea'}, //操作描述
//add database message because the database had changed in 20140321
{name: 'stat', index: 'stat', search:true, editable:false, formatter: statFormatter,
stype: 'select',
searchoptions: {
value: STAT_MAP,
sopt: ['eq','ne']
}
},//状态
//add end
{name: 'oprMsg2', index: 'oprMsg2', search:false,editable: false}, //复审描述
{name: 'oprId2', index: 'oprId2', search:false,editable: false, hidden: true, viewable: false}, //复审员
//
//add rows
{name: 'outAcctNo', index: 'outAcctNo', search:false,editable:false, hidden : true, viewable: false/*,
searchoptions: {
sopt: ['eq']
}*/
},
{name: 'inAcctNo', index: 'inAcctNo', search:false,editable:false, hidden : true, viewable: false/*,
searchoptions: {
sopt: ['eq']
}*/
},
{name: 'oprName2', index: 'oprName2', search:true, width: 90, editable:false,
_searchType:'string'
},
//add end
//
{name: 'outAcctId', index: 'outAcctId', search:false,editable: true, hidden: true, viewable: false, edittype: 'select'}, //付款账户Id
{name: 'inAcctId', index: 'inAcctId', search:false,editable: false, hidden: true, viewable: false}, //收款账户Id
{name: 'oprId', index: 'oprId', search:false,editable: false, hidden: true, viewable: false}, //操作员
{name: 'recOprTime', index: 'recOprTime', search: false, editable: false, hidden:true,
editoptions: {
dataInit : function (elem) {
$(elem).datepicker({ autoclose: true, format: 'yyyymmdd' })
.on("changeDate changeMonth changeYear", function(oDate) {
$(oDate.target).valid();
});
}
}
}, //操作时间
{name: 'recOprTime2', index: 'recOprTime2', search: false, editable: false, hidden:true,
editoptions: {
dataInit : function (elem) {
$(elem).datepicker({ autoclose: true, format: 'yyyymmdd' })
.on("changeDate changeMonth changeYear", function(oDate) {
$(oDate.target).valid();
});
}
}
} //复审时间
],
loadComplete: function() {}
});
}
});
});
function stlmTypeFormatter (val) {
return STLMTYPE_MAP[val];
}
function statFormatter (val) {
return STAT_MAP[val];
}
return App.SettleApp.TotalAccountDetail.List.View;
}); | identifier_body | ||
list-view.js | /**
* @created 2014-3-12 19:27:29
*/
define(['App',
'tpl!app/oms/settle/total-account-detail/list/templates/table-ct.tpl',
'tpl!app/oms/settle/total-account-detail/list/templates/total-account-detail.tpl',
'i18n!app/oms/common/nls/settle',
'jquery.jqGrid',
'jquery.validate',
'bootstrap-datepicker'
], function(App, tableCtTpl, totalAcctDetailTpl, settleLang) {
var STLMTYPE_MAP = {
"0" : settleLang._("total-account-detail.stlmType.0"),
"1" : settleLang._("total-account-detail.stlmType.1"),
"2" : settleLang._("total-account-detail.stlmType.2")
};
var STAT_MAP = {
"0" : "审核通过",
"1" : "审核拒绝",
"2" : "初始化申请"
};
App.module('SettleApp.TotalAccountDetail.List.View', function(View, App, Backbone, Marionette, $, _) {
View.TotalAccountDetails = Marionette.ItemView.extend({
tabId: 'menu.total.account.detail',
template: tableCtTpl,
events: {
},
onRender: function() {
var me = this;
setTimeout(function() {
me.renderGrid();
},1);
},
getRequestDateSelect: function(gird) {
var data = {};
data['id'] = Opf.Grid.getSelRowId(gird);
var $form = $('form.form-total-account-detall').find(':input');
$form.each(function() {
data[$(this).attr('name')] = $(this).val();
});
return {
url: 'api/settle/total-account-details/' + data['id'] + '/update',
data: data,
type: "PUT",
contentType: "application/json",
dataType: "json",
needData: true
};
},
ajaxRequest: function(options, dialog, grid) {
$.ajax({
type: options.type,
contentType: options.contentType,
dataType: options.dataType,
url: options.url,
data: options.needData ? JSON.stringify(options.data) : "",
success: function(resp) {
console.log('审核结果:' + (resp.success || '') + (resp.msg || ''));
$(dialog).dialog("destroy");
$(grid).trigger("reloadGrid", [{current:true}]);
if(resp.success) {
Opf.Toast.success('操作成功');
}
},
error: function(resp) {
console.error(resp.msg || resp.success || resp);
$(dialog).dialog("destroy");
}
});
},
ajaxGetAccount: function(form, value, select){
$.ajax({
type: 'GET',
contentType: 'application/json',
dataType: 'json',
url: url._('stlm.account') + '/' + value + '/accountInfo',
success: function(resp) {
var $form = $(form),
$select = $(select);
$select.empty();
for(var i=0; i<resp.length; i++) {
var appendedData = '<option value="' + resp[i].key + '">' + resp[i].value + '</option>';
$select.append(appendedData);
}
},
error: function(resp){
console.error(resp);
}
});
},
attachValidation: function() {
return {
setupValidation: Opf.Validate.setup,
addValidateRules: function(form){
Opf.Validate.addRules(form, {
rules:{
stlmType:{
required: true
},
outAcctId:{
required: true
},
txAmt:{
required: true,
float: true
},
oprMsg:{
required: true,
maxlength: 300
}
}
});
}
};
},
onClickButton: function(roleGird) {
var me = this;
var tpl = totalAcctDetailTpl();
var $dialog = $(tpl).dialog({
autoOpen: true,
height: Opf.Config._('ui', 'totalAccountDetail.grid.form.extra.height'), //300,
width: Opf.Config._('ui', 'totalAccountDetail.grid.form.extra.width'), //350,
modal: true,
buttons: [{
html: "<i class='icon-ok'></i> 提交",
"class" : "btn btn-xs btn-primary",
click: function(e) {
var $form = $('form.form-total-account-detall');
var validator = $form.validate();
var valid = true;
if(validator && !validator.form()){
valid = false;
}
if(valid){
$($(e.target).closest('button')).addClass('disabled').find('span').html("<i class='icon-ok'></i> 正在提交...");
me.ajaxRequest(me.getRequestDateSelect(roleGird), this, roleGird);
}
}
}, {
html: "<i class='icon-remove'></i> 取消",
"class" : "btn btn-xs",
click: function() {
$( this ).dialog( "destroy" );
}
}],
create: function() {
Opf.Validate.addRules($('form.form-total-account-detall'), {
rules: {
oprMsg2: {
required: true,
maxlength: 300
}
}
});
$(this).prev('.ui-widget-header').find('.ui-dialog-title').addClass('settle-styles-paddingL-15');
},
close: function() {
$( this ).dialog( "destroy" );
}
});
},
renderGrid: function() {
var me = this;
var validation = this.attachValidation();
var roleGird = App.Factory.createJqGrid({
rsId:'totalAccountDetail',
caption: settleLang._('totalAccountDetail.txt'),
jsonReader: {
},
actionsCol: {
// width: Opf.Config._('ui', 'totalAccountDetail.grid.form.actionsCol.width'), // 130,
edit : false,
del: false,
extraButtons: [
{name: 'check', title:'审核', icon: 'icon-opf-verify icon-opf-verify-color', click: function() {
me.onClickButton(roleGird);
}}
],
canButtonRender: function(name, opts, rowData) {
// return true;
// 初始申请的情况下才可以显示审核按钮
if(name === 'check' && rowData.stat !== '2') {
return false;
}
}
},
nav: {
formSize: {
width: Opf.Config._('ui', 'totalAccountDetail.grid.form.width'),
height: Opf.Config._('ui', 'totalAccountDetail.grid.form.height')
},
add : {
beforeShowForm: function(form) {
validation.addValidateRules(form);
var $select = $(form).find('select[name="stlmType"]');
$select.on('change', function(){
var $acctSelect = $('#tr_outAcctId').find('select');
var $caption = $('#tr_outAcctId').find('.CaptionTD');
var value = $(this).val();
if (value === '0') {
$acctSelect.attr('name', 'outAcctId');
$caption.empty().append('付款账户');
me.ajaxGetAccount(form, value, $acctSelect);
} else if (value === '1') {
$acctSelect.attr('name', 'outAcctId');
$caption.empty().append('付款账户');
me.ajaxGetAccount(form, value, $acctSelect);
} else if (value === '2') {
$acctSelect.attr('name', 'inAcctId');
$caption.empty().append('收款账户');
me.ajaxGetAccount(form, value, $acctSelect);
}
});
$select.trigger('change');
},
beforeSubmit: validation.setupValidation
},
edit: {
beforeShowForm: function(form) {
validation.addValidateRules(form);
},
beforeSubmit: validation.setupValidation
},
view: {
width: Opf.Config._('ui', 'totalAccountDetail.grid.viewform.width'),
height: Opf.Config._('ui', 'totalAccountDetail.grid.viewform.height')
}
},
gid: 'total-account-details-grid',//innerly get corresponding ct '#total-account-details-grid-table' '#total-account-details-grid-pager'
url: url._('total.account.detail'),
colNames: {
id : settleLang._('total.account.detail.id'), //ID
stlmDate : settleLang._('total.account.detail.stlm.date'), //账务日期
stlmType : settleLang._('total.account.detail.stlm.type'), //账务维护类型
txAmt : settleLang._('total.account.detail.tx.amt'), //交易金额
realetiveAccount : settleLang._('total.account.detail.realetive.account'),
oprName : settleLang._('total.account.detail.opr.name'),
oprMsg : settleLang._('total.account.detail.opr.msg'), //操作描述
//add database message because the database had changed in 20140321
stat : settleLang._('total.account.detail.stat'), //状态
//add end
oprMsg2 : settleLang._('total.account.detail.opr.msg2'), //复审描述
oprName2 : settleLang._('total.account.detail.opr.name2'),
//add rows
outAcctNo : settleLang._('total.account.detail.out.acct.no'),
inAcctNo : settleLang._('total.account.detail.in.acct.no'),
//add end
//
outAcctId : settleLang._('total.account.detail.out.acct.id'), //付款账户Id
inAcctId : settleLang._('total.account.detail.in.acct.id'), //收款账户Id
oprId : settleLang._('total.account.detail.opr.id'), //操作员
oprId2 : settleLang._('total.account.detail.opr.id2'), //复审员
recOprTime : settleLang._('total.account.detail.rec.opr.time'), //操作时间
recOprTime2 : settleLang._('total.account.detail.rec.opr.time2') //复审时间
},
responsiveOptions: {
hidden: {
ss: ['realetiveAccount', 'oprName', 'oprMsg', 'stat', 'oprMsg2', 'oprName2'],
xs: ['oprName', 'oprMsg', 'stat', 'oprMsg2', 'oprName2'],
sm: ['stat', 'oprMsg2', 'oprName2'],
md: ['oprMsg2', 'oprName2'],
ld: []
}
},
colModel: [
{ name: 'id', index: 'id', editable: false, hidden: true }, //ID
{ name: 'stlmDate', index: 'stlmDate', width: 100, search: true, editable: false,
searchoptions: {
dataInit : function (elem) {
$(elem).datepicker( {autoclose: true, format: "yyyymmdd"} );
},
sopt: ['eq', 'ne', 'lt', 'le', 'gt', 'ge']
}
}, //账务日期
{name: 'stlmType', index: 'stlmType', width: 90, search:true, editable: true, formatter: stlmTypeFormatter,
stype: 'select',
searchoptions: {
value: STLMTYPE_MAP,
sopt: ['eq','ne']
},
edittype:'select',
editoptions: {
value: STLMTYPE_MAP
}
}, //账务维护类型
{name: 'txAmt', index: 'txAmt', search:true, editable: true, formatter: Opf.currencyFormatter,
_searchType:'num'
}, //交易金额
{name: 'realetiveAccount', index: 'realetiveAccount', width:250, search:false,editable:false, formatter : function(val, options, obj){
var account = obj.outAcctNo || obj.inAcctNo || '';
return account;
}},
{name: 'oprName', index: 'oprName', search:true, width: 90, editable:false,
_searchType:'string'
},
{name: 'oprMsg', index: 'oprMsg', search:false,editable: true, edittype: 'textarea'}, //操作描述
//add database message because the database had changed in 20140321
{name: 'stat', index: 'stat', search:true, editable:false, formatter: statFormatter,
stype: 'select',
searchoptions: {
value: STAT_MAP,
sopt: ['eq','ne']
}
},//状态
//add end
{name: 'oprMsg2', index: 'oprMsg2', search:false,editable: false}, //复审描述
{name: 'oprId2', index: 'oprId2', search:false,editable: false, hidden: true, viewable: false}, //复审员
//
//add rows
{name: 'outAcctNo', index: 'outAcctNo', search:false,editable:false, hidden : true, viewable: false/*,
searchoptions: {
sopt: ['eq']
}*/
},
{name: 'inAcctNo', index: 'inAcctNo', search:false,editable:false, hidden : true, viewable: false/*,
searchoptions: {
sopt: ['eq']
}*/
},
{name: 'oprName2', index: 'oprName2', search:true, width: 90, editable:false,
_searchType:'string'
},
//add end
//
{name: 'outAcctId', index: 'outAcctId', search:false,editable: true, hidden: true, viewable: false, edittype: 'select'}, //付款账户Id
{name: 'inAcctId', index: 'inAcctId', search:false,editable: false, hidden: true, viewable: false}, //收款账户Id
{name: 'oprId', index: 'oprId', search:false,editable: false, hidden: true, viewable: false}, //操作员
{name: 'recOprTime', index: 'recOprTime', search: false, editable: false, hidden:true,
editoptions: {
dataInit : function (elem) {
$(elem).datepicker({ autoclose: true, format: 'yyyymmdd' })
.on("changeDate changeMonth changeYear", function(oDate) {
$(oDate.target).valid();
});
}
}
}, //操作时间
{name: 'recOprTime2', index: 'recOprTime2', search: false, editable: false, hidden:true,
editoptions: {
dataInit : function (elem) {
$(elem).datepicker({ autoclose: true, format: 'yyyymmdd' })
.on("changeDate changeMonth changeYear", function(oDate) {
$(oDate.target).valid();
});
}
}
} //复审时间
],
loadComplete: function() {}
});
}
});
});
function stlmTypeFormatter (val) {
return STLMTYPE_MAP[val];
}
function statFormatter (val) {
return STAT_MAP[val];
}
return App.SettleApp.TotalAccountDetail.List.View;
}); | identifier_name | ||
ACPF_ExportMuRaster.py | # SSURGO_ExportMuRaster.py
#
# Convert MUPOLYGON featureclass to raster for the specified SSURGO geodatabase.
# By default any small NoData areas (< 5000 sq meters) will be filled using
# the Majority value.
#
# Input mupolygon featureclass must have a projected coordinate system or it will skip.
# Input databases and featureclasses must use naming convention established by the
# 'SDM Export By State' tool.
#
# For geographic regions that have USGS NLCD available, the tool wil automatically
# align the coordinate system and raster grid to match.
#
# 10-31-2013 Added gap fill method
#
# 11-05-2014
# 11-22-2013
# 12-10-2013 Problem with using non-unique cellvalues for raster. Going back to
# creating an integer version of MUKEY in the mapunit polygon layer.
# 12-13-2013 Occasionally see error messages related to temporary GRIDs (g_g*) created
# under "C:\Users\steve.peaslee\AppData\Local\Temp\a subfolder". These
# are probably caused by orphaned INFO tables.
# 01-08-2014 Added basic raster metadata (still need process steps)
# 01-12-2014 Restricted conversion to use only input MUPOLYGON featureclass having
# a projected coordinate system with linear units=Meter
# 01-31-2014 Added progressor bar to 'Saving MUKEY values..'. Seems to be a hangup at this
# point when processing CONUS geodatabase
# 02-14-2014 Changed FeatureToLayer (CELL_CENTER) to PolygonToRaster (MAXIMUM_COMBINED_AREA)
# and removed the Gap Fill option.
# 2014-09-27 Added ISO metadata import
#
# 2014-10-18 Noticed that failure to create raster seemed to be related to long
# file names or non-alphanumeric characters such as a dash in the name.
#
# 2014-10-29 Removed ORDER BY MUKEY sql clause because some computers were failing on that line.
# Don't understand why.
#
# 2014-10-31 Added error message if the MUKEY column is not populated in the MUPOLYGON featureclass
#
# 2014-11-04 Problems occur when the user's gp environment points to Default.gdb for the scratchWorkpace.
# Added a fatal error message when that occurs.
#
# 2015-01-15 Hopefully fixed some of the issues that caused the raster conversion to crash at the end.
# Cleaned up some of the current workspace settings and moved the renaming of the final raster.
#
# 2015-02-26 Adding option for tiling raster conversion by areasymbol and then mosaicing. Slower and takes
# more disk space, but gets the job done when otherwise PolygonToRaster fails on big datasets.
# 2015-02-27 Make bTiling variable an integer (0, 2, 5) that can be used to slice the areasymbol value. This will
# give the user an option to tile by state (2) or by survey area (5)
# 2015-03-10 Moved sequence of CheckInExtension. It was at the beginning which seems wrong.
#
# 2015-03-11 Switched tiled raster format from geodatabase raster to TIFF. This should allow the entire
# temporary folder to be deleted instead of deleting rasters one-at-a-time (slow).
# 2015-03-11 Added attribute index (mukey) to raster attribute table
# 2015-03-13 Modified output raster name by incorporating the geodatabase name (after '_' and before ".gdb")
#
# 2015-09-16 Temporarily renamed output raster using a shorter string
#
# 2015-09-16 Trying several things to address 9999 failure on CONUS. Created a couple of ArcInfo workspace in temp
# 2015-09-16 Compacting geodatabase before PolygonToRaster conversion
#
# 2015-09-18 Still having problems with CONUS raster even with ArcGIS 10.3. Even the tiled method failed once
# on AR105. Actually may have been the next survey, but random order so don't know which one for sure.
# Trying to reorder mosaic to match the spatial order of the polygon layers. Need to figure out if
# the 99999 error in PolygonToRaster is occurring with the same soil survey or same count or any
# other pattern.
#
# 2015-09-18 Need to remember to turn off all layers in ArcMap. Redraw is triggered after each tile.
#
# 2015-10-01 Found problem apparently caused by 10.3. SnapRaster functionality was failing with tiles because of
# MakeFeatureLayer where_clause. Perhaps due to cursor lock persistence? Rewrote entire function to
# use SAPOLYGON featureclass to define extents for tiles. This seems to be working better anyway.
#
# 2015-10-02 Need to look at some method for sorting the extents of each tile and sort them in a geographic fashion.
# A similar method was used in the Create gSSURGO database tools for the Append process.
#
# 2015-10-23 Jennifer and I finally figured out what was causing her PolygonToRaster 9999 errors.
# It was dashes in the output GDB path. Will add a check for bad characters in path.
#
# 2015-10-26 Changed up SnapToNLCD function to incorporate SnapRaster input as long as the coordinate
# system matches and the extent coordinates are integer (no floating point!).
#
# 2015-10-27 Looking at possible issue with batchmode processing of rasters. Jennifer had several
# errors when trying to run all states at once.
#
# 2015-11-03 Fixed failure when indexing non-geodatabase rasters such as .IMG.
## ===================================================================================
class MyError(Exception):
pass
## ===================================================================================
def PrintMsg(msg, severity=0):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#
#Split the message on \n first, so that if it's multiple lines, a GPMessage will be added for each line
try:
for string in msg.split('\n'):
#Add a geoprocessing message (in case this is run as a tool)
if severity == 0:
arcpy.AddMessage(string)
elif severity == 1:
arcpy.AddWarning(string)
elif severity == 2:
arcpy.AddMessage(" ")
arcpy.AddError(string)
except:
pass
## ===================================================================================
def errorMsg():
try:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
theMsg = tbinfo + "\n" + str(sys.exc_type)+ ": " + str(sys.exc_value)
PrintMsg(theMsg, 2)
except:
PrintMsg("Unhandled error in errorMsg method", 2)
pass
## ===================================================================================
def WriteToLog(theMsg, theRptFile):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#print msg
#
try:
fh = open(theRptFile, "a")
theMsg = "\n" + theMsg
fh.write(theMsg)
fh.close()
except:
errorMsg()
pass
## ===================================================================================
def elapsedTime(start):
# Calculate amount of time since "start" and return time string
try:
# Stop timer
#
end = time.time()
# Calculate total elapsed seconds
eTotal = end - start
# day = 86400 seconds
# hour = 3600 seconds
# minute = 60 seconds
eMsg = ""
# calculate elapsed days
eDay1 = eTotal / 86400
eDay2 = math.modf(eDay1)
eDay = int(eDay2[1])
eDayR = eDay2[0]
if eDay > 1:
eMsg = eMsg + str(eDay) + " days "
elif eDay == 1:
eMsg = eMsg + str(eDay) + " day "
# Calculated elapsed hours
eHour1 = eDayR * 24
eHour2 = math.modf(eHour1)
eHour = int(eHour2[1])
eHourR = eHour2[0]
if eDay > 0 or eHour > 0:
if eHour > 1:
eMsg = eMsg + str(eHour) + " hours "
else:
eMsg = eMsg + str(eHour) + " hour "
# Calculate elapsed minutes
eMinute1 = eHourR * 60
eMinute2 = math.modf(eMinute1)
eMinute = int(eMinute2[1])
eMinuteR = eMinute2[0]
if eDay > 0 or eHour > 0 or eMinute > 0:
if eMinute > 1:
eMsg = eMsg + str(eMinute) + " minutes "
else:
eMsg = eMsg + str(eMinute) + " minute "
# Calculate elapsed secons
eSeconds = "%.1f" % (eMinuteR * 60)
if eSeconds == "1.00":
eMsg = eMsg + eSeconds + " second "
else:
eMsg = eMsg + eSeconds + " seconds "
return eMsg
except:
errorMsg()
return ""
## ===================================================================================
def Number_Format(num, places=0, bCommas=True):
try:
# Format a number according to locality and given places
#locale.setlocale(locale.LC_ALL, "")
if bCommas:
theNumber = locale.format("%.*f", (places, num), True)
else:
theNumber = locale.format("%.*f", (places, num), False)
return theNumber
except:
errorMsg()
return False
## ===================================================================================
def | (outputRaster):
# For no apparent reason, ArcGIS sometimes fails to build statistics. Might work one
# time and then the next time it may fail without any error message.
#
try:
#PrintMsg(" \n\tChecking raster statistics", 0)
for propType in ['MINIMUM', 'MAXIMUM', 'MEAN', 'STD']:
statVal = arcpy.GetRasterProperties_management (outputRaster, propType).getOutput(0)
#PrintMsg("\t\t" + propType + ": " + statVal, 1)
return True
except:
return False
## ===================================================================================
def UpdateMetadata(outputWS, target, surveyInfo, iRaster):
#
# Used for non-ISO metadata
#
# Search words: xxSTATExx, xxSURVEYSxx, xxTODAYxx, xxFYxx
#
try:
PrintMsg("\tUpdating metadata...")
arcpy.SetProgressor("default", "Updating metadata")
# Set metadata translator file
dInstall = arcpy.GetInstallInfo()
installPath = dInstall["InstallDir"]
prod = r"Metadata/Translator/ARCGIS2FGDC.xml"
mdTranslator = os.path.join(installPath, prod)
# Define input and output XML files
mdImport = os.path.join(env.scratchFolder, "xxImport.xml") # the metadata xml that will provide the updated info
xmlPath = os.path.dirname(sys.argv[0])
mdExport = os.path.join(xmlPath, "gSSURGO_MapunitRaster.xml") # original template metadata in script directory
# Cleanup output XML files from previous runs
if os.path.isfile(mdImport):
os.remove(mdImport)
# Get replacement value for the search words
#
stDict = StateNames()
st = os.path.basename(outputWS)[8:-4]
if st in stDict:
# Get state name from the geodatabase
mdState = stDict[st]
else:
# Leave state name blank. In the future it would be nice to include a tile name when appropriate
mdState = ""
# Set date strings for metadata, based upon today's date
#
d = datetime.date.today()
today = str(d.isoformat().replace("-",""))
# Set fiscal year according to the current month. If run during January thru September,
# set it to the current calendar year. Otherwise set it to the next calendar year.
#
if d.month > 9:
fy = "FY" + str(d.year + 1)
else:
fy = "FY" + str(d.year)
# Convert XML to tree format
tree = ET.parse(mdExport)
root = tree.getroot()
# new citeInfo has title.text, edition.text, serinfo/issue.text
citeInfo = root.findall('idinfo/citation/citeinfo/')
if not citeInfo is None:
# Process citation elements
# title, edition, issue
#
for child in citeInfo:
#PrintMsg("\t\t" + str(child.tag), 0)
if child.tag == "title":
if child.text.find('xxSTATExx') >= 0:
child.text = child.text.replace('xxSTATExx', mdState)
elif mdState != "":
child.text = child.text + " - " + mdState
elif child.tag == "edition":
if child.text == 'xxFYxx':
child.text = fy
elif child.tag == "serinfo":
for subchild in child.iter('issue'):
if subchild.text == "xxFYxx":
subchild.text = fy
# Update place keywords
ePlace = root.find('idinfo/keywords/place')
if not ePlace is None:
#PrintMsg("\t\tplace keywords", 0)
for child in ePlace.iter('placekey'):
if child.text == "xxSTATExx":
child.text = mdState
elif child.text == "xxSURVEYSxx":
child.text = surveyInfo
# Update credits
eIdInfo = root.find('idinfo')
if not eIdInfo is None:
#PrintMsg("\t\tcredits", 0)
for child in eIdInfo.iter('datacred'):
sCreds = child.text
if sCreds.find("xxSTATExx") >= 0:
#PrintMsg("\t\tcredits " + mdState, 0)
child.text = child.text.replace("xxSTATExx", mdState)
if sCreds.find("xxFYxx") >= 0:
#PrintMsg("\t\tcredits " + fy, 0)
child.text = child.text.replace("xxFYxx", fy)
if sCreds.find("xxTODAYxx") >= 0:
#PrintMsg("\t\tcredits " + today, 0)
child.text = child.text.replace("xxTODAYxx", today)
idPurpose = root.find('idinfo/descript/purpose')
if not idPurpose is None:
ip = idPurpose.text
if ip.find("xxFYxx") >= 0:
idPurpose.text = ip.replace("xxFYxx", fy)
#PrintMsg("\t\tpurpose", 0)
# create new xml file which will be imported, thereby updating the table's metadata
tree.write(mdImport, encoding="utf-8", xml_declaration=None, default_namespace=None, method="xml")
# import updated metadata to the geodatabase table
# Using three different methods with the same XML file works for ArcGIS 10.1
#
#PrintMsg("\t\tApplying metadata translators...")
arcpy.MetadataImporter_conversion (mdImport, target)
arcpy.ImportMetadata_conversion(mdImport, "FROM_FGDC", target, "DISABLED")
# delete the temporary xml metadata file
if os.path.isfile(mdImport):
os.remove(mdImport)
pass
# delete metadata tool logs
logFolder = os.path.dirname(env.scratchFolder)
logFile = os.path.basename(mdImport).split(".")[0] + "*"
currentWS = env.workspace
env.workspace = logFolder
logList = arcpy.ListFiles(logFile)
for lg in logList:
arcpy.Delete_management(lg)
env.workspace = currentWS
return True
except:
errorMsg()
False
## ===================================================================================
def CheckSpatialReference(muPolygon):
# Make sure that the coordinate system is projected and units are meters
try:
desc = arcpy.Describe(muPolygon)
inputSR = desc.spatialReference
if inputSR.type.upper() == "PROJECTED":
if inputSR.linearUnitName.upper() == "METER":
env.outputCoordinateSystem = inputSR
return True
else:
raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer does not have a valid coordinate system for gSSURGO"
else:
raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer must have a projected coordinate system"
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
return False
except:
errorMsg()
return False
## ===================================================================================
def ConvertToRaster(muPolygon, rasterName):
# main function used for raster conversion
try:
#
# Set geoprocessing environment
#
env.overwriteOutput = True
arcpy.env.compression = "LZ77"
env.tileSize = "128 128"
gdb = os.path.dirname(muPolygon)
outputRaster = os.path.join(gdb, rasterName)
iRaster = 10 # output resolution is 10 meters
# Make sure that the env.scratchGDB is NOT Default.gdb. This causes problems for
# some unknown reason.
if (os.path.basename(env.scratchGDB).lower() == "default.gdb") or \
(os.path.basename(env.scratchWorkspace).lower() == "default.gdb") or \
(os.path.basename(env.scratchGDB).lower() == gdb):
raise MyError, "Invalid scratch workspace setting (" + env.scratchWorkspace + ")"
# Create an ArcInfo workspace under the scratchFolder. Trying to prevent
# 99999 errors for PolygonToRaster on very large databases
#
aiWorkspace = env.scratchFolder
if not arcpy.Exists(os.path.join(aiWorkspace, "info")):
#PrintMsg(" \nCreating ArcInfo workspace (" + os.path.basename(aiWorkspace) + ") in: " + os.path.dirname(aiWorkspace), 1)
arcpy.CreateArcInfoWorkspace_management(os.path.dirname(aiWorkspace), os.path.basename(aiWorkspace))
# turn off automatic Pyramid creation and Statistics calculation
env.rasterStatistics = "NONE"
env.pyramid = "PYRAMIDS 0"
env.workspace = gdb
# Need to check for dashes or spaces in folder names or leading numbers in database or raster names
desc = arcpy.Describe(muPolygon)
if not arcpy.Exists(muPolygon):
raise MyError, "Could not find input featureclass: " + muPolygon
# Check input layer's coordinate system to make sure horizontal units are meters
# set the output coordinate system for the raster (neccessary for PolygonToRaster)
if CheckSpatialReference(muPolygon) == False:
return False
# Sometimes it helps to compact large databases before raster conversion
#arcpy.SetProgressorLabel("Compacting database prior to rasterization...")
#arcpy.Compact_management(gdb)
# For rasters named using an attribute value, some attribute characters can result in
# 'illegal' names.
outputRaster = outputRaster.replace("-", "")
if arcpy.Exists(outputRaster):
arcpy.Delete_management(outputRaster)
time.sleep(1)
if arcpy.Exists(outputRaster):
err = "Output raster (" + os.path.basename(outputRaster) + ") already exists"
raise MyError, err
#start = time.time() # start clock to measure total processing time
#begin = time.time() # start clock to measure set up time
time.sleep(2)
PrintMsg(" \nBeginning raster conversion process", 0)
# Create Lookup table for storing MUKEY values and their integer counterparts
#
lu = os.path.join(env.scratchGDB, "Lookup")
if arcpy.Exists(lu):
arcpy.Delete_management(lu)
# The Lookup table contains both MUKEY and its integer counterpart (CELLVALUE).
# Using the joined lookup table creates a raster with CellValues that are the
# same as MUKEY (but integer). This will maintain correct MUKEY values
# during a moscaic or clip.
#
arcpy.CreateTable_management(os.path.dirname(lu), os.path.basename(lu))
arcpy.AddField_management(lu, "CELLVALUE", "LONG")
arcpy.AddField_management(lu, "mukey", "TEXT", "#", "#", "30")
# Create list of areasymbols present in the MUPOLYGON featureclass
# Having problems processing CONUS list of MUKEYs. Python seems to be running out of memory,
# but I don't see high usage in Windows Task Manager
#
# PrintMsg(" \nscratchFolder set to: " + env.scratchFolder, 1)
# Create list of MUKEY values from the MUPOLYGON featureclass
#
# Create a list of map unit keys present in the MUPOLYGON featureclass
#
PrintMsg("\tGetting list of mukeys from input soil polygon layer...", 0)
arcpy.SetProgressor("default", "Getting inventory of map units...")
tmpPolys = "SoilPolygons"
sqlClause = ("DISTINCT", None)
with arcpy.da.SearchCursor(muPolygon, ["mukey"], "", "", "", sql_clause=sqlClause) as srcCursor:
# Create a unique, sorted list of MUKEY values in the MUPOLYGON featureclass
mukeyList = [row[0] for row in srcCursor]
mukeyList.sort()
if len(mukeyList) == 0:
raise MyError, "Failed to get MUKEY values from " + muPolygon
muCnt = len(mukeyList)
# Load MUKEY values into Lookup table
#
#PrintMsg("\tSaving " + Number_Format(muCnt, 0, True) + " MUKEY values for " + Number_Format(polyCnt, 0, True) + " polygons" , 0)
arcpy.SetProgressorLabel("Creating lookup table...")
with arcpy.da.InsertCursor(lu, ("CELLVALUE", "mukey") ) as inCursor:
for mukey in mukeyList:
rec = mukey, mukey
inCursor.insertRow(rec)
# Add MUKEY attribute index to Lookup table
arcpy.AddIndex_management(lu, ["mukey"], "Indx_LU")
#
# End of Lookup table code
# Match NLCD raster (snapraster)
cdlRasters = arcpy.ListRasters("wsCDL*")
if len(cdlRasters) == 0:
raise MyError, "Required Cropland Data Layer rasters missing from " + gdb
else:
cdlRaster = cdlRasters[-1]
env.snapRaster = cdlRaster
#env.extent = cdlRaster
# Raster conversion process...
#
PrintMsg(" \nConverting featureclass " + os.path.basename(muPolygon) + " to raster (" + str(iRaster) + " meter)", 0)
tmpPolys = "poly_tmp"
arcpy.MakeFeatureLayer_management (muPolygon, tmpPolys)
arcpy.AddJoin_management (tmpPolys, "mukey", lu, "mukey", "KEEP_ALL")
arcpy.SetProgressor("default", "Running PolygonToRaster conversion...")
# Need to make sure that the join was successful
time.sleep(1)
rasterFields = arcpy.ListFields(tmpPolys)
rasterFieldNames = list()
for rFld in rasterFields:
rasterFieldNames.append(rFld.name.upper())
if not "LOOKUP.CELLVALUE" in rasterFieldNames:
raise MyError, "Join failed for Lookup table (CELLVALUE)"
if (os.path.basename(muPolygon).upper() + ".MUKEY") in rasterFieldNames:
#raise MyError, "Join failed for Lookup table (SPATIALVERSION)"
priorityFld = os.path.basename(muPolygon) + ".MUKEY"
else:
priorityFld = os.path.basename(muPolygon) + ".CELLVALUE"
#ListEnv()
arcpy.PolygonToRaster_conversion(tmpPolys, "Lookup.CELLVALUE", outputRaster, "MAXIMUM_COMBINED_AREA", "", iRaster) # No priority field for single raster
# immediately delete temporary polygon layer to free up memory for the rest of the process
time.sleep(1)
arcpy.Delete_management(tmpPolys)
# End of single raster process
# Now finish up the single temporary raster
#
PrintMsg(" \nFinalizing raster conversion process:", 0)
# Reset the stopwatch for the raster post-processing
#begin = time.time()
# Remove lookup table
if arcpy.Exists(lu):
arcpy.Delete_management(lu)
# ****************************************************
# Build pyramids and statistics
# ****************************************************
if arcpy.Exists(outputRaster):
time.sleep(1)
arcpy.SetProgressor("default", "Calculating raster statistics...")
PrintMsg("\tCalculating raster statistics...", 0)
env.pyramid = "PYRAMIDS -1 NEAREST"
arcpy.env.rasterStatistics = 'STATISTICS 100 100'
arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE" )
if CheckStatistics(outputRaster) == False:
# For some reason the BuildPyramidsandStatistics command failed to build statistics for this raster.
#
# Try using CalculateStatistics while setting an AOI
PrintMsg("\tInitial attempt to create statistics failed, trying another method...", 0)
time.sleep(3)
if arcpy.Exists(os.path.join(gdb, "SAPOLYGON")):
# Try running CalculateStatistics with an AOI to limit the area that is processed
# if we have to use SAPOLYGON as an AOI, this will be REALLY slow
#arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE", os.path.join(outputWS, "SAPOLYGON") )
arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE" )
if CheckStatistics(outputRaster) == False:
time.sleep(3)
PrintMsg("\tFailed in both attempts to create statistics for raster layer", 1)
arcpy.SetProgressor("default", "Building pyramids...")
PrintMsg("\tBuilding pyramids...", 0)
arcpy.BuildPyramids_management(outputRaster, "-1", "NONE", "NEAREST", "DEFAULT", "", "SKIP_EXISTING")
# ****************************************************
# Add MUKEY to final raster
# ****************************************************
# Build attribute table for final output raster. Sometimes it fails to automatically build.
PrintMsg("\tBuilding raster attribute table and updating MUKEY values", )
arcpy.SetProgressor("default", "Building raster attrribute table...")
arcpy.BuildRasterAttributeTable_management(outputRaster)
# Add MUKEY values to final mapunit raster
#
arcpy.SetProgressor("default", "Adding MUKEY attribute to raster...")
arcpy.AddField_management(outputRaster, "MUKEY", "TEXT", "#", "#", "30")
with arcpy.da.UpdateCursor(outputRaster, ["VALUE", "MUKEY"]) as cur:
for rec in cur:
rec[1] = rec[0]
cur.updateRow(rec)
# Add attribute index (MUKEY) for raster
arcpy.AddIndex_management(outputRaster, ["mukey"], "Indx_RasterMukey")
else:
err = "Missing output raster (" + outputRaster + ")"
raise MyError, err
# Compare list of original mukeys with the list of raster mukeys
# Report discrepancies. These are usually thin polygons along survey boundaries,
# added to facilitate a line-join.
#
arcpy.SetProgressor("default", "Looking for missing map units...")
rCnt = int(arcpy.GetRasterProperties_management (outputRaster, "UNIQUEVALUECOUNT").getOutput(0))
if rCnt <> muCnt:
missingList = list()
rList = list()
# Create list of raster mukeys...
with arcpy.da.SearchCursor(outputRaster, ("MUKEY",)) as rcur:
for rec in rcur:
mukey = rec[0]
rList.append(mukey)
missingList = list(set(mukeyList) - set(rList))
queryList = list()
for mukey in missingList:
queryList.append("'" + mukey + "'")
if len(queryList) > 0:
PrintMsg("\tDiscrepancy in mapunit count for new raster", 1)
#PrintMsg("\t\tInput polygon mapunits: " + Number_Format(muCnt, 0, True), 0)
#PrintMsg("\t\tOutput raster mapunits: " + Number_Format(rCnt, 0, True), 0)
PrintMsg("The following MUKEY values were present in the original MUPOLYGON featureclass, ", 1)
PrintMsg("but not in the raster", 1)
PrintMsg("\t\tMUKEY IN (" + ", ".join(queryList) + ") \n ", 0)
# Update metadata file for the geodatabase
#
# Query the output SACATALOG table to get list of surveys that were exported to the gSSURGO
#
#saTbl = os.path.join(theGDB, "sacatalog")
#expList = list()
#with arcpy.da.SearchCursor(saTbl, ("AREASYMBOL", "SAVEREST")) as srcCursor:
# for rec in srcCursor:
# expList.append(rec[0] + " (" + str(rec[1]).split()[0] + ")")
#surveyInfo = ", ".join(expList)
surveyInfo = "" # could get this from SDA
#time.sleep(2)
arcpy.SetProgressorLabel("Updating metadata NOT...")
#bMetaData = UpdateMetadata(outputWS, outputRaster, surveyInfo, iRaster)
del outputRaster
del muPolygon
arcpy.CheckInExtension("Spatial")
return True
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
arcpy.CheckInExtension("Spatial")
return False
except MemoryError:
raise MyError, "Not enough memory to process. Try running again with the 'Use tiles' option"
except:
errorMsg()
arcpy.CheckInExtension("Spatial")
return False
## ===================================================================================
## ===================================================================================
## MAIN
## ===================================================================================
# Import system modules
import sys, string, os, arcpy, locale, traceback, math, time, datetime, shutil
import xml.etree.cElementTree as ET
from arcpy import env
# Create the Geoprocessor object
try:
if __name__ == "__main__":
# get parameters
muPolygon = arcpy.GetParameterAsText(0) # required gSSURGO polygon layer
rasterName = arcpy.GetParameterAsText(1) # required name for output gdb raster
env.overwriteOutput= True
iRaster = 10
# Get Spatial Analyst extension
if arcpy.CheckExtension("Spatial") == "Available":
# try to find the name of the tile from the geodatabase name
# set the name of the output raster using the tilename and cell resolution
from arcpy.sa import *
arcpy.CheckOutExtension("Spatial")
else:
raise MyError, "Required Spatial Analyst extension is not available"
# Call function that does all of the work
bRaster = ConvertToRaster(muPolygon, theSnapRaster, iRaster)
arcpy.CheckInExtension("Spatial")
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
except:
errorMsg()
| CheckStatistics | identifier_name |
ACPF_ExportMuRaster.py | # SSURGO_ExportMuRaster.py
#
# Convert MUPOLYGON featureclass to raster for the specified SSURGO geodatabase.
# By default any small NoData areas (< 5000 sq meters) will be filled using
# the Majority value.
#
# Input mupolygon featureclass must have a projected coordinate system or it will skip.
# Input databases and featureclasses must use naming convention established by the
# 'SDM Export By State' tool.
#
# For geographic regions that have USGS NLCD available, the tool wil automatically
# align the coordinate system and raster grid to match.
#
# 10-31-2013 Added gap fill method
#
# 11-05-2014
# 11-22-2013
# 12-10-2013 Problem with using non-unique cellvalues for raster. Going back to
# creating an integer version of MUKEY in the mapunit polygon layer.
# 12-13-2013 Occasionally see error messages related to temporary GRIDs (g_g*) created
# under "C:\Users\steve.peaslee\AppData\Local\Temp\a subfolder". These
# are probably caused by orphaned INFO tables.
# 01-08-2014 Added basic raster metadata (still need process steps)
# 01-12-2014 Restricted conversion to use only input MUPOLYGON featureclass having
# a projected coordinate system with linear units=Meter
# 01-31-2014 Added progressor bar to 'Saving MUKEY values..'. Seems to be a hangup at this
# point when processing CONUS geodatabase
# 02-14-2014 Changed FeatureToLayer (CELL_CENTER) to PolygonToRaster (MAXIMUM_COMBINED_AREA)
# and removed the Gap Fill option.
# 2014-09-27 Added ISO metadata import
#
# 2014-10-18 Noticed that failure to create raster seemed to be related to long
# file names or non-alphanumeric characters such as a dash in the name.
#
# 2014-10-29 Removed ORDER BY MUKEY sql clause because some computers were failing on that line.
# Don't understand why.
#
# 2014-10-31 Added error message if the MUKEY column is not populated in the MUPOLYGON featureclass
#
# 2014-11-04 Problems occur when the user's gp environment points to Default.gdb for the scratchWorkpace.
# Added a fatal error message when that occurs.
#
# 2015-01-15 Hopefully fixed some of the issues that caused the raster conversion to crash at the end.
# Cleaned up some of the current workspace settings and moved the renaming of the final raster.
#
# 2015-02-26 Adding option for tiling raster conversion by areasymbol and then mosaicing. Slower and takes
# more disk space, but gets the job done when otherwise PolygonToRaster fails on big datasets.
# 2015-02-27 Make bTiling variable an integer (0, 2, 5) that can be used to slice the areasymbol value. This will
# give the user an option to tile by state (2) or by survey area (5)
# 2015-03-10 Moved sequence of CheckInExtension. It was at the beginning which seems wrong.
#
# 2015-03-11 Switched tiled raster format from geodatabase raster to TIFF. This should allow the entire
# temporary folder to be deleted instead of deleting rasters one-at-a-time (slow).
# 2015-03-11 Added attribute index (mukey) to raster attribute table
# 2015-03-13 Modified output raster name by incorporating the geodatabase name (after '_' and before ".gdb")
#
# 2015-09-16 Temporarily renamed output raster using a shorter string
#
# 2015-09-16 Trying several things to address 9999 failure on CONUS. Created a couple of ArcInfo workspace in temp
# 2015-09-16 Compacting geodatabase before PolygonToRaster conversion
#
# 2015-09-18 Still having problems with CONUS raster even with ArcGIS 10.3. Even the tiled method failed once
# on AR105. Actually may have been the next survey, but random order so don't know which one for sure.
# Trying to reorder mosaic to match the spatial order of the polygon layers. Need to figure out if
# the 99999 error in PolygonToRaster is occurring with the same soil survey or same count or any
# other pattern.
#
# 2015-09-18 Need to remember to turn off all layers in ArcMap. Redraw is triggered after each tile.
#
# 2015-10-01 Found problem apparently caused by 10.3. SnapRaster functionality was failing with tiles because of
# MakeFeatureLayer where_clause. Perhaps due to cursor lock persistence? Rewrote entire function to
# use SAPOLYGON featureclass to define extents for tiles. This seems to be working better anyway.
#
# 2015-10-02 Need to look at some method for sorting the extents of each tile and sort them in a geographic fashion.
# A similar method was used in the Create gSSURGO database tools for the Append process.
#
# 2015-10-23 Jennifer and I finally figured out what was causing her PolygonToRaster 9999 errors.
# It was dashes in the output GDB path. Will add a check for bad characters in path.
#
# 2015-10-26 Changed up SnapToNLCD function to incorporate SnapRaster input as long as the coordinate
# system matches and the extent coordinates are integer (no floating point!).
#
# 2015-10-27 Looking at possible issue with batchmode processing of rasters. Jennifer had several
# errors when trying to run all states at once.
#
# 2015-11-03 Fixed failure when indexing non-geodatabase rasters such as .IMG.
## ===================================================================================
class MyError(Exception):
pass
## ===================================================================================
def PrintMsg(msg, severity=0):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#
#Split the message on \n first, so that if it's multiple lines, a GPMessage will be added for each line
try:
for string in msg.split('\n'):
#Add a geoprocessing message (in case this is run as a tool)
if severity == 0:
arcpy.AddMessage(string)
elif severity == 1:
arcpy.AddWarning(string)
elif severity == 2:
arcpy.AddMessage(" ")
arcpy.AddError(string)
except:
pass
## ===================================================================================
def errorMsg():
try:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
theMsg = tbinfo + "\n" + str(sys.exc_type)+ ": " + str(sys.exc_value)
PrintMsg(theMsg, 2)
except:
PrintMsg("Unhandled error in errorMsg method", 2)
pass
## ===================================================================================
def WriteToLog(theMsg, theRptFile):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#print msg
#
try:
fh = open(theRptFile, "a")
theMsg = "\n" + theMsg
fh.write(theMsg)
fh.close()
except:
errorMsg()
pass
## ===================================================================================
def elapsedTime(start):
# Calculate amount of time since "start" and return time string
try:
# Stop timer
#
end = time.time()
# Calculate total elapsed seconds
eTotal = end - start
# day = 86400 seconds
# hour = 3600 seconds
# minute = 60 seconds
eMsg = ""
# calculate elapsed days
eDay1 = eTotal / 86400
eDay2 = math.modf(eDay1)
eDay = int(eDay2[1])
eDayR = eDay2[0]
if eDay > 1:
eMsg = eMsg + str(eDay) + " days "
elif eDay == 1:
eMsg = eMsg + str(eDay) + " day "
# Calculated elapsed hours
eHour1 = eDayR * 24
eHour2 = math.modf(eHour1)
eHour = int(eHour2[1])
eHourR = eHour2[0]
if eDay > 0 or eHour > 0:
if eHour > 1:
eMsg = eMsg + str(eHour) + " hours "
else:
eMsg = eMsg + str(eHour) + " hour "
# Calculate elapsed minutes
eMinute1 = eHourR * 60
eMinute2 = math.modf(eMinute1)
eMinute = int(eMinute2[1])
eMinuteR = eMinute2[0]
if eDay > 0 or eHour > 0 or eMinute > 0:
if eMinute > 1:
eMsg = eMsg + str(eMinute) + " minutes "
else:
eMsg = eMsg + str(eMinute) + " minute "
# Calculate elapsed secons
eSeconds = "%.1f" % (eMinuteR * 60)
if eSeconds == "1.00":
eMsg = eMsg + eSeconds + " second "
else:
eMsg = eMsg + eSeconds + " seconds "
return eMsg
except:
errorMsg()
return ""
## ===================================================================================
def Number_Format(num, places=0, bCommas=True):
try:
# Format a number according to locality and given places
#locale.setlocale(locale.LC_ALL, "")
if bCommas:
theNumber = locale.format("%.*f", (places, num), True)
else:
theNumber = locale.format("%.*f", (places, num), False)
return theNumber
except:
errorMsg()
return False
## ===================================================================================
def CheckStatistics(outputRaster):
# For no apparent reason, ArcGIS sometimes fails to build statistics. Might work one
# time and then the next time it may fail without any error message.
#
try:
#PrintMsg(" \n\tChecking raster statistics", 0)
for propType in ['MINIMUM', 'MAXIMUM', 'MEAN', 'STD']:
statVal = arcpy.GetRasterProperties_management (outputRaster, propType).getOutput(0)
#PrintMsg("\t\t" + propType + ": " + statVal, 1)
return True
except:
return False
## ===================================================================================
def UpdateMetadata(outputWS, target, surveyInfo, iRaster):
#
# Used for non-ISO metadata
#
# Search words: xxSTATExx, xxSURVEYSxx, xxTODAYxx, xxFYxx
#
|
## ===================================================================================
def CheckSpatialReference(muPolygon):
# Make sure that the coordinate system is projected and units are meters
try:
desc = arcpy.Describe(muPolygon)
inputSR = desc.spatialReference
if inputSR.type.upper() == "PROJECTED":
if inputSR.linearUnitName.upper() == "METER":
env.outputCoordinateSystem = inputSR
return True
else:
raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer does not have a valid coordinate system for gSSURGO"
else:
raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer must have a projected coordinate system"
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
return False
except:
errorMsg()
return False
## ===================================================================================
def ConvertToRaster(muPolygon, rasterName):
# main function used for raster conversion
try:
#
# Set geoprocessing environment
#
env.overwriteOutput = True
arcpy.env.compression = "LZ77"
env.tileSize = "128 128"
gdb = os.path.dirname(muPolygon)
outputRaster = os.path.join(gdb, rasterName)
iRaster = 10 # output resolution is 10 meters
# Make sure that the env.scratchGDB is NOT Default.gdb. This causes problems for
# some unknown reason.
if (os.path.basename(env.scratchGDB).lower() == "default.gdb") or \
(os.path.basename(env.scratchWorkspace).lower() == "default.gdb") or \
(os.path.basename(env.scratchGDB).lower() == gdb):
raise MyError, "Invalid scratch workspace setting (" + env.scratchWorkspace + ")"
# Create an ArcInfo workspace under the scratchFolder. Trying to prevent
# 99999 errors for PolygonToRaster on very large databases
#
aiWorkspace = env.scratchFolder
if not arcpy.Exists(os.path.join(aiWorkspace, "info")):
#PrintMsg(" \nCreating ArcInfo workspace (" + os.path.basename(aiWorkspace) + ") in: " + os.path.dirname(aiWorkspace), 1)
arcpy.CreateArcInfoWorkspace_management(os.path.dirname(aiWorkspace), os.path.basename(aiWorkspace))
# turn off automatic Pyramid creation and Statistics calculation
env.rasterStatistics = "NONE"
env.pyramid = "PYRAMIDS 0"
env.workspace = gdb
# Need to check for dashes or spaces in folder names or leading numbers in database or raster names
desc = arcpy.Describe(muPolygon)
if not arcpy.Exists(muPolygon):
raise MyError, "Could not find input featureclass: " + muPolygon
# Check input layer's coordinate system to make sure horizontal units are meters
# set the output coordinate system for the raster (neccessary for PolygonToRaster)
if CheckSpatialReference(muPolygon) == False:
return False
# Sometimes it helps to compact large databases before raster conversion
#arcpy.SetProgressorLabel("Compacting database prior to rasterization...")
#arcpy.Compact_management(gdb)
# For rasters named using an attribute value, some attribute characters can result in
# 'illegal' names.
outputRaster = outputRaster.replace("-", "")
if arcpy.Exists(outputRaster):
arcpy.Delete_management(outputRaster)
time.sleep(1)
if arcpy.Exists(outputRaster):
err = "Output raster (" + os.path.basename(outputRaster) + ") already exists"
raise MyError, err
#start = time.time() # start clock to measure total processing time
#begin = time.time() # start clock to measure set up time
time.sleep(2)
PrintMsg(" \nBeginning raster conversion process", 0)
# Create Lookup table for storing MUKEY values and their integer counterparts
#
lu = os.path.join(env.scratchGDB, "Lookup")
if arcpy.Exists(lu):
arcpy.Delete_management(lu)
# The Lookup table contains both MUKEY and its integer counterpart (CELLVALUE).
# Using the joined lookup table creates a raster with CellValues that are the
# same as MUKEY (but integer). This will maintain correct MUKEY values
# during a moscaic or clip.
#
arcpy.CreateTable_management(os.path.dirname(lu), os.path.basename(lu))
arcpy.AddField_management(lu, "CELLVALUE", "LONG")
arcpy.AddField_management(lu, "mukey", "TEXT", "#", "#", "30")
# Create list of areasymbols present in the MUPOLYGON featureclass
# Having problems processing CONUS list of MUKEYs. Python seems to be running out of memory,
# but I don't see high usage in Windows Task Manager
#
# PrintMsg(" \nscratchFolder set to: " + env.scratchFolder, 1)
# Create list of MUKEY values from the MUPOLYGON featureclass
#
# Create a list of map unit keys present in the MUPOLYGON featureclass
#
PrintMsg("\tGetting list of mukeys from input soil polygon layer...", 0)
arcpy.SetProgressor("default", "Getting inventory of map units...")
tmpPolys = "SoilPolygons"
sqlClause = ("DISTINCT", None)
with arcpy.da.SearchCursor(muPolygon, ["mukey"], "", "", "", sql_clause=sqlClause) as srcCursor:
# Create a unique, sorted list of MUKEY values in the MUPOLYGON featureclass
mukeyList = [row[0] for row in srcCursor]
mukeyList.sort()
if len(mukeyList) == 0:
raise MyError, "Failed to get MUKEY values from " + muPolygon
muCnt = len(mukeyList)
# Load MUKEY values into Lookup table
#
#PrintMsg("\tSaving " + Number_Format(muCnt, 0, True) + " MUKEY values for " + Number_Format(polyCnt, 0, True) + " polygons" , 0)
arcpy.SetProgressorLabel("Creating lookup table...")
with arcpy.da.InsertCursor(lu, ("CELLVALUE", "mukey") ) as inCursor:
for mukey in mukeyList:
rec = mukey, mukey
inCursor.insertRow(rec)
# Add MUKEY attribute index to Lookup table
arcpy.AddIndex_management(lu, ["mukey"], "Indx_LU")
#
# End of Lookup table code
# Match NLCD raster (snapraster)
cdlRasters = arcpy.ListRasters("wsCDL*")
if len(cdlRasters) == 0:
raise MyError, "Required Cropland Data Layer rasters missing from " + gdb
else:
cdlRaster = cdlRasters[-1]
env.snapRaster = cdlRaster
#env.extent = cdlRaster
# Raster conversion process...
#
PrintMsg(" \nConverting featureclass " + os.path.basename(muPolygon) + " to raster (" + str(iRaster) + " meter)", 0)
tmpPolys = "poly_tmp"
arcpy.MakeFeatureLayer_management (muPolygon, tmpPolys)
arcpy.AddJoin_management (tmpPolys, "mukey", lu, "mukey", "KEEP_ALL")
arcpy.SetProgressor("default", "Running PolygonToRaster conversion...")
# Need to make sure that the join was successful
time.sleep(1)
rasterFields = arcpy.ListFields(tmpPolys)
rasterFieldNames = list()
for rFld in rasterFields:
rasterFieldNames.append(rFld.name.upper())
if not "LOOKUP.CELLVALUE" in rasterFieldNames:
raise MyError, "Join failed for Lookup table (CELLVALUE)"
if (os.path.basename(muPolygon).upper() + ".MUKEY") in rasterFieldNames:
#raise MyError, "Join failed for Lookup table (SPATIALVERSION)"
priorityFld = os.path.basename(muPolygon) + ".MUKEY"
else:
priorityFld = os.path.basename(muPolygon) + ".CELLVALUE"
#ListEnv()
arcpy.PolygonToRaster_conversion(tmpPolys, "Lookup.CELLVALUE", outputRaster, "MAXIMUM_COMBINED_AREA", "", iRaster) # No priority field for single raster
# immediately delete temporary polygon layer to free up memory for the rest of the process
time.sleep(1)
arcpy.Delete_management(tmpPolys)
# End of single raster process
# Now finish up the single temporary raster
#
PrintMsg(" \nFinalizing raster conversion process:", 0)
# Reset the stopwatch for the raster post-processing
#begin = time.time()
# Remove lookup table
if arcpy.Exists(lu):
arcpy.Delete_management(lu)
# ****************************************************
# Build pyramids and statistics
# ****************************************************
if arcpy.Exists(outputRaster):
time.sleep(1)
arcpy.SetProgressor("default", "Calculating raster statistics...")
PrintMsg("\tCalculating raster statistics...", 0)
env.pyramid = "PYRAMIDS -1 NEAREST"
arcpy.env.rasterStatistics = 'STATISTICS 100 100'
arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE" )
if CheckStatistics(outputRaster) == False:
# For some reason the BuildPyramidsandStatistics command failed to build statistics for this raster.
#
# Try using CalculateStatistics while setting an AOI
PrintMsg("\tInitial attempt to create statistics failed, trying another method...", 0)
time.sleep(3)
if arcpy.Exists(os.path.join(gdb, "SAPOLYGON")):
# Try running CalculateStatistics with an AOI to limit the area that is processed
# if we have to use SAPOLYGON as an AOI, this will be REALLY slow
#arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE", os.path.join(outputWS, "SAPOLYGON") )
arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE" )
if CheckStatistics(outputRaster) == False:
time.sleep(3)
PrintMsg("\tFailed in both attempts to create statistics for raster layer", 1)
arcpy.SetProgressor("default", "Building pyramids...")
PrintMsg("\tBuilding pyramids...", 0)
arcpy.BuildPyramids_management(outputRaster, "-1", "NONE", "NEAREST", "DEFAULT", "", "SKIP_EXISTING")
# ****************************************************
# Add MUKEY to final raster
# ****************************************************
# Build attribute table for final output raster. Sometimes it fails to automatically build.
PrintMsg("\tBuilding raster attribute table and updating MUKEY values", )
arcpy.SetProgressor("default", "Building raster attrribute table...")
arcpy.BuildRasterAttributeTable_management(outputRaster)
# Add MUKEY values to final mapunit raster
#
arcpy.SetProgressor("default", "Adding MUKEY attribute to raster...")
arcpy.AddField_management(outputRaster, "MUKEY", "TEXT", "#", "#", "30")
with arcpy.da.UpdateCursor(outputRaster, ["VALUE", "MUKEY"]) as cur:
for rec in cur:
rec[1] = rec[0]
cur.updateRow(rec)
# Add attribute index (MUKEY) for raster
arcpy.AddIndex_management(outputRaster, ["mukey"], "Indx_RasterMukey")
else:
err = "Missing output raster (" + outputRaster + ")"
raise MyError, err
# Compare list of original mukeys with the list of raster mukeys
# Report discrepancies. These are usually thin polygons along survey boundaries,
# added to facilitate a line-join.
#
arcpy.SetProgressor("default", "Looking for missing map units...")
rCnt = int(arcpy.GetRasterProperties_management (outputRaster, "UNIQUEVALUECOUNT").getOutput(0))
if rCnt <> muCnt:
missingList = list()
rList = list()
# Create list of raster mukeys...
with arcpy.da.SearchCursor(outputRaster, ("MUKEY",)) as rcur:
for rec in rcur:
mukey = rec[0]
rList.append(mukey)
missingList = list(set(mukeyList) - set(rList))
queryList = list()
for mukey in missingList:
queryList.append("'" + mukey + "'")
if len(queryList) > 0:
PrintMsg("\tDiscrepancy in mapunit count for new raster", 1)
#PrintMsg("\t\tInput polygon mapunits: " + Number_Format(muCnt, 0, True), 0)
#PrintMsg("\t\tOutput raster mapunits: " + Number_Format(rCnt, 0, True), 0)
PrintMsg("The following MUKEY values were present in the original MUPOLYGON featureclass, ", 1)
PrintMsg("but not in the raster", 1)
PrintMsg("\t\tMUKEY IN (" + ", ".join(queryList) + ") \n ", 0)
# Update metadata file for the geodatabase
#
# Query the output SACATALOG table to get list of surveys that were exported to the gSSURGO
#
#saTbl = os.path.join(theGDB, "sacatalog")
#expList = list()
#with arcpy.da.SearchCursor(saTbl, ("AREASYMBOL", "SAVEREST")) as srcCursor:
# for rec in srcCursor:
# expList.append(rec[0] + " (" + str(rec[1]).split()[0] + ")")
#surveyInfo = ", ".join(expList)
surveyInfo = "" # could get this from SDA
#time.sleep(2)
arcpy.SetProgressorLabel("Updating metadata NOT...")
#bMetaData = UpdateMetadata(outputWS, outputRaster, surveyInfo, iRaster)
del outputRaster
del muPolygon
arcpy.CheckInExtension("Spatial")
return True
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
arcpy.CheckInExtension("Spatial")
return False
except MemoryError:
raise MyError, "Not enough memory to process. Try running again with the 'Use tiles' option"
except:
errorMsg()
arcpy.CheckInExtension("Spatial")
return False
## ===================================================================================
## ===================================================================================
## MAIN
## ===================================================================================
# Import system modules
import sys, string, os, arcpy, locale, traceback, math, time, datetime, shutil
import xml.etree.cElementTree as ET
from arcpy import env
# Create the Geoprocessor object
try:
if __name__ == "__main__":
# get parameters
muPolygon = arcpy.GetParameterAsText(0) # required gSSURGO polygon layer
rasterName = arcpy.GetParameterAsText(1) # required name for output gdb raster
env.overwriteOutput= True
iRaster = 10
# Get Spatial Analyst extension
if arcpy.CheckExtension("Spatial") == "Available":
# try to find the name of the tile from the geodatabase name
# set the name of the output raster using the tilename and cell resolution
from arcpy.sa import *
arcpy.CheckOutExtension("Spatial")
else:
raise MyError, "Required Spatial Analyst extension is not available"
# Call function that does all of the work
bRaster = ConvertToRaster(muPolygon, theSnapRaster, iRaster)
arcpy.CheckInExtension("Spatial")
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
except:
errorMsg()
| try:
PrintMsg("\tUpdating metadata...")
arcpy.SetProgressor("default", "Updating metadata")
# Set metadata translator file
dInstall = arcpy.GetInstallInfo()
installPath = dInstall["InstallDir"]
prod = r"Metadata/Translator/ARCGIS2FGDC.xml"
mdTranslator = os.path.join(installPath, prod)
# Define input and output XML files
mdImport = os.path.join(env.scratchFolder, "xxImport.xml") # the metadata xml that will provide the updated info
xmlPath = os.path.dirname(sys.argv[0])
mdExport = os.path.join(xmlPath, "gSSURGO_MapunitRaster.xml") # original template metadata in script directory
# Cleanup output XML files from previous runs
if os.path.isfile(mdImport):
os.remove(mdImport)
# Get replacement value for the search words
#
stDict = StateNames()
st = os.path.basename(outputWS)[8:-4]
if st in stDict:
# Get state name from the geodatabase
mdState = stDict[st]
else:
# Leave state name blank. In the future it would be nice to include a tile name when appropriate
mdState = ""
# Set date strings for metadata, based upon today's date
#
d = datetime.date.today()
today = str(d.isoformat().replace("-",""))
# Set fiscal year according to the current month. If run during January thru September,
# set it to the current calendar year. Otherwise set it to the next calendar year.
#
if d.month > 9:
fy = "FY" + str(d.year + 1)
else:
fy = "FY" + str(d.year)
# Convert XML to tree format
tree = ET.parse(mdExport)
root = tree.getroot()
# new citeInfo has title.text, edition.text, serinfo/issue.text
citeInfo = root.findall('idinfo/citation/citeinfo/')
if not citeInfo is None:
# Process citation elements
# title, edition, issue
#
for child in citeInfo:
#PrintMsg("\t\t" + str(child.tag), 0)
if child.tag == "title":
if child.text.find('xxSTATExx') >= 0:
child.text = child.text.replace('xxSTATExx', mdState)
elif mdState != "":
child.text = child.text + " - " + mdState
elif child.tag == "edition":
if child.text == 'xxFYxx':
child.text = fy
elif child.tag == "serinfo":
for subchild in child.iter('issue'):
if subchild.text == "xxFYxx":
subchild.text = fy
# Update place keywords
ePlace = root.find('idinfo/keywords/place')
if not ePlace is None:
#PrintMsg("\t\tplace keywords", 0)
for child in ePlace.iter('placekey'):
if child.text == "xxSTATExx":
child.text = mdState
elif child.text == "xxSURVEYSxx":
child.text = surveyInfo
# Update credits
eIdInfo = root.find('idinfo')
if not eIdInfo is None:
#PrintMsg("\t\tcredits", 0)
for child in eIdInfo.iter('datacred'):
sCreds = child.text
if sCreds.find("xxSTATExx") >= 0:
#PrintMsg("\t\tcredits " + mdState, 0)
child.text = child.text.replace("xxSTATExx", mdState)
if sCreds.find("xxFYxx") >= 0:
#PrintMsg("\t\tcredits " + fy, 0)
child.text = child.text.replace("xxFYxx", fy)
if sCreds.find("xxTODAYxx") >= 0:
#PrintMsg("\t\tcredits " + today, 0)
child.text = child.text.replace("xxTODAYxx", today)
idPurpose = root.find('idinfo/descript/purpose')
if not idPurpose is None:
ip = idPurpose.text
if ip.find("xxFYxx") >= 0:
idPurpose.text = ip.replace("xxFYxx", fy)
#PrintMsg("\t\tpurpose", 0)
# create new xml file which will be imported, thereby updating the table's metadata
tree.write(mdImport, encoding="utf-8", xml_declaration=None, default_namespace=None, method="xml")
# import updated metadata to the geodatabase table
# Using three different methods with the same XML file works for ArcGIS 10.1
#
#PrintMsg("\t\tApplying metadata translators...")
arcpy.MetadataImporter_conversion (mdImport, target)
arcpy.ImportMetadata_conversion(mdImport, "FROM_FGDC", target, "DISABLED")
# delete the temporary xml metadata file
if os.path.isfile(mdImport):
os.remove(mdImport)
pass
# delete metadata tool logs
logFolder = os.path.dirname(env.scratchFolder)
logFile = os.path.basename(mdImport).split(".")[0] + "*"
currentWS = env.workspace
env.workspace = logFolder
logList = arcpy.ListFiles(logFile)
for lg in logList:
arcpy.Delete_management(lg)
env.workspace = currentWS
return True
except:
errorMsg()
False | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.