repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
citrix/terraform-provider-netscaler | vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go | 22560 | package initwd
import (
"fmt"
"log"
"os"
"path/filepath"
"strings"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform-config-inspect/tfconfig"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/internal/earlyconfig"
"github.com/hashicorp/terraform/internal/modsdir"
"github.com/hashicorp/terraform/registry"
"github.com/hashicorp/terraform/registry/regsrc"
"github.com/hashicorp/terraform/registry/response"
"github.com/hashicorp/terraform/tfdiags"
)
type ModuleInstaller struct {
modsDir string
reg *registry.Client
// The keys in moduleVersions are resolved and trimmed registry source
// addresses and the values are the registry response.
moduleVersions map[string]*response.ModuleVersions
// The keys in moduleVersionsUrl are the moduleVersion struct below and
// addresses and the values are the download URLs.
moduleVersionsUrl map[moduleVersion]string
}
type moduleVersion struct {
module string
version string
}
func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller {
return &ModuleInstaller{
modsDir: modsDir,
reg: reg,
moduleVersions: make(map[string]*response.ModuleVersions),
moduleVersionsUrl: make(map[moduleVersion]string),
}
}
// InstallModules analyses the root module in the given directory and installs
// all of its direct and transitive dependencies into the given modules
// directory, which must already exist.
//
// Since InstallModules makes possibly-time-consuming calls to remote services,
// a hook interface is supported to allow the caller to be notified when
// each module is installed and, for remote modules, when downloading begins.
// LoadConfig guarantees that two hook calls will not happen concurrently but
// it does not guarantee any particular ordering of hook calls. This mechanism
// is for UI feedback only and does not give the caller any control over the
// process.
//
// If modules are already installed in the target directory, they will be
// skipped unless their source address or version have changed or unless
// the upgrade flag is set.
//
// InstallModules never deletes any directory, except in the case where it
// needs to replace a directory that is already present with a newly-extracted
// package.
//
// If the returned diagnostics contains errors then the module installation
// may have wholly or partially completed. Modules must be loaded in order
// to find their dependencies, so this function does many of the same checks
// as LoadConfig as a side-effect.
//
// If successful (the returned diagnostics contains no errors) then the
// first return value is the early configuration tree that was constructed by
// the installation process.
func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) {
log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir)
rootMod, diags := earlyconfig.LoadModule(rootDir)
if rootMod == nil {
return nil, diags
}
manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to read modules manifest file",
fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err),
))
return nil, diags
}
getter := reusingGetter{}
cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter)
diags = append(diags, instDiags...)
return cfg, diags
}
func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if hooks == nil {
// Use our no-op implementation as a placeholder
hooks = ModuleInstallHooksImpl{}
}
// Create a manifest record for the root module. This will be used if
// there are any relative-pathed modules in the root.
manifest[""] = modsdir.Record{
Key: "",
Dir: rootDir,
}
cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc(
func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
key := manifest.ModuleKey(req.Path)
instPath := i.packageInstallPath(req.Path)
log.Printf("[DEBUG] Module installer: begin %s", key)
// First we'll check if we need to upgrade/replace an existing
// installed module, and delete it out of the way if so.
replace := upgrade
if !replace {
record, recorded := manifest[key]
switch {
case !recorded:
log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key)
replace = true
case record.SourceAddr != req.SourceAddr:
log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr)
replace = true
case record.Version != nil && !req.VersionConstraints.Check(record.Version):
log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints)
replace = true
}
}
// If we _are_ planning to replace this module, then we'll remove
// it now so our installation code below won't conflict with any
// existing remnants.
if replace {
if _, recorded := manifest[key]; recorded {
log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key)
}
delete(manifest, key)
// Deleting a module invalidates all of its descendent modules too.
keyPrefix := key + "."
for subKey := range manifest {
if strings.HasPrefix(subKey, keyPrefix) {
if _, recorded := manifest[subKey]; recorded {
log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey)
}
delete(manifest, subKey)
}
}
}
record, recorded := manifest[key]
if !recorded {
// Clean up any stale cache directory that might be present.
// If this is a local (relative) source then the dir will
// not exist, but we'll ignore that.
log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key)
err := os.RemoveAll(instPath)
if err != nil && !os.IsNotExist(err) {
log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err)
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to remove local module cache",
fmt.Sprintf(
"Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s",
instPath, err,
),
))
return nil, nil, diags
}
} else {
// If this module is already recorded and its root directory
// exists then we will just load what's already there and
// keep our existing record.
info, err := os.Stat(record.Dir)
if err == nil && info.IsDir() {
mod, mDiags := earlyconfig.LoadModule(record.Dir)
diags = diags.Append(mDiags)
log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir)
return mod, record.Version, diags
}
}
// If we get down here then it's finally time to actually install
// the module. There are some variants to this process depending
// on what type of module source address we have.
switch {
case isLocalSourceAddr(req.SourceAddr):
log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr)
mod, mDiags := i.installLocalModule(req, key, manifest, hooks)
diags = append(diags, mDiags...)
return mod, nil, diags
case isRegistrySourceAddr(req.SourceAddr):
addr, err := regsrc.ParseModuleSource(req.SourceAddr)
if err != nil {
// Should never happen because isRegistrySourceAddr already validated
panic(err)
}
log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr)
mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter)
diags = append(diags, mDiags...)
return mod, v, diags
default:
log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr)
mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter)
diags = append(diags, mDiags...)
return mod, nil, diags
}
},
))
diags = append(diags, cDiags...)
err := manifest.WriteSnapshotToDir(i.modsDir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to update module manifest",
fmt.Sprintf("Unable to write the module manifest file: %s", err),
))
}
return cfg, diags
}
func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
parentKey := manifest.ModuleKey(req.Parent.Path)
parentRecord, recorded := manifest[parentKey]
if !recorded {
// This is indicative of a bug rather than a user-actionable error
panic(fmt.Errorf("missing manifest record for parent module %s", parentKey))
}
if len(req.VersionConstraints) != 0 {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid version constraint",
fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line),
))
}
// For local sources we don't actually need to modify the
// filesystem at all because the parent already wrote
// the files we need, and so we just load up what's already here.
newDir := filepath.Join(parentRecord.Dir, req.SourceAddr)
log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir)
// it is possible that the local directory is a symlink
newDir, err := filepath.EvalSymlinks(newDir)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Unreadable module directory",
fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()),
))
}
mod, mDiags := earlyconfig.LoadModule(newDir)
if mod == nil {
// nil indicates missing or unreadable directory, so we'll
// discard the returned diags and return a more specific
// error message here.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Unreadable module directory",
fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line),
))
} else {
diags = diags.Append(mDiags)
}
// Note the local location in our manifest.
manifest[key] = modsdir.Record{
Key: key,
Dir: newDir,
SourceAddr: req.SourceAddr,
}
log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir)
hooks.Install(key, nil, newDir)
return mod, diags
}
func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
hostname, err := addr.SvcHost()
if err != nil {
// If it looks like the user was trying to use punycode then we'll generate
// a specialized error for that case. We require the unicode form of
// hostname so that hostnames are always human-readable in configuration
// and punycode can't be used to hide a malicious module hostname.
if strings.HasPrefix(addr.RawHost.Raw, "xn--") {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid module registry hostname",
fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line),
))
} else {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid module registry hostname",
fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line),
))
}
return nil, nil, diags
}
reg := i.reg
var resp *response.ModuleVersions
var exists bool
// check if we've already looked up this module from the registry
if resp, exists = i.moduleVersions[addr.String()]; exists {
log.Printf("[TRACE] %s using already found available versions of %s at %s", key, addr, hostname)
} else {
log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname)
resp, err = reg.ModuleVersions(addr)
if err != nil {
if registry.IsModuleNotFound(err) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Module not found",
fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname),
))
} else {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Error accessing remote module registry",
fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err),
))
}
return nil, nil, diags
}
i.moduleVersions[addr.String()] = resp
}
// The response might contain information about dependencies to allow us
// to potentially optimize future requests, but we don't currently do that
// and so for now we'll just take the first item which is guaranteed to
// be the address we requested.
if len(resp.Modules) < 1 {
// Should never happen, but since this is a remote service that may
// be implemented by third-parties we will handle it gracefully.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid response from remote module registry",
fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line),
))
return nil, nil, diags
}
modMeta := resp.Modules[0]
var latestMatch *version.Version
var latestVersion *version.Version
for _, mv := range modMeta.Versions {
v, err := version.NewVersion(mv.Version)
if err != nil {
// Should never happen if the registry server is compliant with
// the protocol, but we'll warn if not to assist someone who
// might be developing a module registry server.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Invalid response from remote module registry",
fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line),
))
continue
}
// If we've found a pre-release version then we'll ignore it unless
// it was exactly requested.
if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() {
log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v)
continue
}
if latestVersion == nil || v.GreaterThan(latestVersion) {
latestVersion = v
}
if req.VersionConstraints.Check(v) {
if latestMatch == nil || v.GreaterThan(latestMatch) {
latestMatch = v
}
}
}
if latestVersion == nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Module has no versions",
fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname),
))
return nil, nil, diags
}
if latestMatch == nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Unresolvable module version constraint",
fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion),
))
return nil, nil, diags
}
// Report up to the caller that we're about to start downloading.
packageAddr, _ := splitAddrSubdir(req.SourceAddr)
hooks.Download(key, packageAddr, latestMatch)
// If we manage to get down here then we've found a suitable version to
// install, so we need to ask the registry where we should download it from.
// The response to this is a go-getter-style address string.
// first check the cache for the download URL
moduleAddr := moduleVersion{module: addr.String(), version: latestMatch.String()}
if _, exists := i.moduleVersionsUrl[moduleAddr]; !exists {
url, err := reg.ModuleLocation(addr, latestMatch.String())
if err != nil {
log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err)
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid response from remote module registry",
fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch),
))
return nil, nil, diags
}
i.moduleVersionsUrl[moduleVersion{module: addr.String(), version: latestMatch.String()}] = url
}
dlAddr := i.moduleVersionsUrl[moduleAddr]
log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr)
modDir, err := getter.getWithGoGetter(instPath, dlAddr)
if err != nil {
// Errors returned by go-getter have very inconsistent quality as
// end-user error messages, but for now we're accepting that because
// we have no way to recognize any specific errors to improve them
// and masking the error entirely would hide valuable diagnostic
// information from the user.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to download module",
fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err),
))
return nil, nil, diags
}
log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir)
if addr.RawSubmodule != "" {
// Append the user's requested subdirectory to any subdirectory that
// was implied by any of the nested layers we expanded within go-getter.
modDir = filepath.Join(modDir, addr.RawSubmodule)
}
log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir)
// Finally we are ready to try actually loading the module.
mod, mDiags := earlyconfig.LoadModule(modDir)
if mod == nil {
// nil indicates missing or unreadable directory, so we'll
// discard the returned diags and return a more specific
// error message here. For registry modules this actually
// indicates a bug in the code above, since it's not the
// user's responsibility to create the directory in this case.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Unreadable module directory",
fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir),
))
} else {
diags = append(diags, mDiags...)
}
// Note the local location in our manifest.
manifest[key] = modsdir.Record{
Key: key,
Version: latestMatch,
Dir: modDir,
SourceAddr: req.SourceAddr,
}
log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir)
hooks.Install(key, latestMatch, modDir)
return mod, latestMatch, diags
}
func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
// Report up to the caller that we're about to start downloading.
packageAddr, _ := splitAddrSubdir(req.SourceAddr)
hooks.Download(key, packageAddr, nil)
if len(req.VersionConstraints) != 0 {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid version constraint",
fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line),
))
return nil, diags
}
modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr)
if err != nil {
if _, ok := err.(*MaybeRelativePathErr); ok {
log.Printf(
"[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../",
req.SourceAddr,
)
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Module not found",
fmt.Sprintf(
"The module address %q could not be resolved.\n\n"+
"If you intended this as a path relative to the current "+
"module, use \"./%s\" instead. The \"./\" prefix "+
"indicates that the address is a relative filesystem path.",
req.SourceAddr, req.SourceAddr,
),
))
} else {
// Errors returned by go-getter have very inconsistent quality as
// end-user error messages, but for now we're accepting that because
// we have no way to recognize any specific errors to improve them
// and masking the error entirely would hide valuable diagnostic
// information from the user.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to download module",
fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err),
))
}
return nil, diags
}
log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir)
mod, mDiags := earlyconfig.LoadModule(modDir)
if mod == nil {
// nil indicates missing or unreadable directory, so we'll
// discard the returned diags and return a more specific
// error message here. For go-getter modules this actually
// indicates a bug in the code above, since it's not the
// user's responsibility to create the directory in this case.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Unreadable module directory",
fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir),
))
} else {
diags = append(diags, mDiags...)
}
// Note the local location in our manifest.
manifest[key] = modsdir.Record{
Key: key,
Dir: modDir,
SourceAddr: req.SourceAddr,
}
log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir)
hooks.Install(key, nil, modDir)
return mod, diags
}
func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string {
return filepath.Join(i.modsDir, strings.Join(modulePath, "."))
}
| apache-2.0 |
libra/libra | language/diem-tools/writeset-transaction-generator/src/admin_script_builder.rs | 2656 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use diem_framework::compile_script;
use diem_types::{
account_address::AccountAddress,
account_config::diem_root_address,
transaction::{Script, WriteSetPayload},
};
use handlebars::Handlebars;
use serde::Serialize;
use std::{collections::HashMap, io::Write, path::PathBuf};
use tempfile::NamedTempFile;
/// The relative path to the scripts templates
pub const SCRIPTS_DIR_PATH: &str = "templates";
fn compile_admin_script(input: &str) -> Result<Script> {
let mut temp_file = NamedTempFile::new()?;
temp_file.write_all(input.as_bytes())?;
let cur_path = temp_file.path().to_str().unwrap().to_owned();
Ok(Script::new(compile_script(cur_path), vec![], vec![]))
}
pub fn template_path() -> PathBuf {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push(SCRIPTS_DIR_PATH.to_string());
path
}
pub fn encode_remove_validators_payload(validators: Vec<AccountAddress>) -> WriteSetPayload {
assert!(!validators.is_empty(), "Unexpected validator set length");
let mut script = template_path();
script.push("remove_validators.move");
let script = {
let mut hb = Handlebars::new();
hb.set_strict_mode(true);
hb.register_template_file("script", script).unwrap();
let mut data = HashMap::new();
data.insert("addresses", validators);
let output = hb.render("script", &data).unwrap();
compile_admin_script(output.as_str()).unwrap()
};
WriteSetPayload::Script {
script,
execute_as: diem_root_address(),
}
}
pub fn encode_custom_script<T: Serialize>(
script_name_in_templates: &str,
args: &T,
execute_as: Option<AccountAddress>,
) -> WriteSetPayload {
let mut script = template_path();
script.push(script_name_in_templates);
let script = {
let mut hb = Handlebars::new();
hb.register_template_file("script", script).unwrap();
hb.set_strict_mode(true);
let output = hb.render("script", args).unwrap();
compile_admin_script(output.as_str()).unwrap()
};
WriteSetPayload::Script {
script,
execute_as: execute_as.unwrap_or_else(diem_root_address),
}
}
pub fn encode_halt_network_payload() -> WriteSetPayload {
let mut script = template_path();
script.push("halt_transactions.move");
WriteSetPayload::Script {
script: Script::new(
compile_script(script.to_str().unwrap().to_owned()),
vec![],
vec![],
),
execute_as: diem_root_address(),
}
}
| apache-2.0 |
timaar/Tiimspot | src/main/java/com/timaar/tiimspot/config/WebConfigurer.java | 6238 | package com.timaar.tiimspot.config;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.servlet.InstrumentedFilter;
import com.codahale.metrics.servlets.MetricsServlet;
import com.timaar.tiimspot.web.filter.CachingHttpHeadersFilter;
import com.timaar.tiimspot.web.filter.StaticResourcesProductionFilter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.boot.context.embedded.ConfigurableEmbeddedServletContainer;
import org.springframework.boot.context.embedded.EmbeddedServletContainerCustomizer;
import org.springframework.boot.context.embedded.MimeMappings;
import org.springframework.boot.context.embedded.ServletContextInitializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import org.springframework.web.cors.CorsConfiguration;
import org.springframework.web.cors.UrlBasedCorsConfigurationSource;
import org.springframework.web.filter.CorsFilter;
import java.util.*;
import javax.inject.Inject;
import javax.servlet.*;
/**
* Configuration of web application with Servlet 3.0 APIs.
*/
@Configuration
@AutoConfigureAfter(CacheConfiguration.class)
public class WebConfigurer implements ServletContextInitializer, EmbeddedServletContainerCustomizer {
private final Logger log = LoggerFactory.getLogger(WebConfigurer.class);
@Inject
private Environment env;
@Inject
private JHipsterProperties props;
@Autowired(required = false)
private MetricRegistry metricRegistry;
@Override
public void onStartup(ServletContext servletContext) throws ServletException {
log.info("Web application configuration, using profiles: {}", Arrays.toString(env.getActiveProfiles()));
EnumSet<DispatcherType> disps = EnumSet.of(DispatcherType.REQUEST, DispatcherType.FORWARD, DispatcherType.ASYNC);
if (!env.acceptsProfiles(Constants.SPRING_PROFILE_FAST)) {
initMetrics(servletContext, disps);
}
if (env.acceptsProfiles(Constants.SPRING_PROFILE_PRODUCTION)) {
initCachingHttpHeadersFilter(servletContext, disps);
initStaticResourcesProductionFilter(servletContext, disps);
}
log.info("Web application fully configured");
}
/**
* Set up Mime types.
*/
@Override
public void customize(ConfigurableEmbeddedServletContainer container) {
MimeMappings mappings = new MimeMappings(MimeMappings.DEFAULT);
// IE issue, see https://github.com/jhipster/generator-jhipster/pull/711
mappings.add("html", "text/html;charset=utf-8");
// CloudFoundry issue, see https://github.com/cloudfoundry/gorouter/issues/64
mappings.add("json", "text/html;charset=utf-8");
container.setMimeMappings(mappings);
}
/**
* Initializes the static resources production Filter.
*/
private void initStaticResourcesProductionFilter(ServletContext servletContext,
EnumSet<DispatcherType> disps) {
log.debug("Registering static resources production Filter");
FilterRegistration.Dynamic staticResourcesProductionFilter =
servletContext.addFilter("staticResourcesProductionFilter",
new StaticResourcesProductionFilter());
staticResourcesProductionFilter.addMappingForUrlPatterns(disps, true, "/");
staticResourcesProductionFilter.addMappingForUrlPatterns(disps, true, "/index.html");
staticResourcesProductionFilter.addMappingForUrlPatterns(disps, true, "/assets/*");
staticResourcesProductionFilter.addMappingForUrlPatterns(disps, true, "/scripts/*");
staticResourcesProductionFilter.setAsyncSupported(true);
}
/**
* Initializes the caching HTTP Headers Filter.
*/
private void initCachingHttpHeadersFilter(ServletContext servletContext,
EnumSet<DispatcherType> disps) {
log.debug("Registering Caching HTTP Headers Filter");
FilterRegistration.Dynamic cachingHttpHeadersFilter =
servletContext.addFilter("cachingHttpHeadersFilter",
new CachingHttpHeadersFilter(env));
cachingHttpHeadersFilter.addMappingForUrlPatterns(disps, true, "/dist/assets/*");
cachingHttpHeadersFilter.addMappingForUrlPatterns(disps, true, "/dist/scripts/*");
cachingHttpHeadersFilter.setAsyncSupported(true);
}
/**
* Initializes Metrics.
*/
private void initMetrics(ServletContext servletContext, EnumSet<DispatcherType> disps) {
log.debug("Initializing Metrics registries");
servletContext.setAttribute(InstrumentedFilter.REGISTRY_ATTRIBUTE,
metricRegistry);
servletContext.setAttribute(MetricsServlet.METRICS_REGISTRY,
metricRegistry);
log.debug("Registering Metrics Filter");
FilterRegistration.Dynamic metricsFilter = servletContext.addFilter("webappMetricsFilter",
new InstrumentedFilter());
metricsFilter.addMappingForUrlPatterns(disps, true, "/*");
metricsFilter.setAsyncSupported(true);
log.debug("Registering Metrics Servlet");
ServletRegistration.Dynamic metricsAdminServlet =
servletContext.addServlet("metricsServlet", new MetricsServlet());
metricsAdminServlet.addMapping("/metrics/metrics/*");
metricsAdminServlet.setAsyncSupported(true);
metricsAdminServlet.setLoadOnStartup(2);
}
@Bean
public CorsFilter corsFilter() {
UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();
CorsConfiguration config = props.getCors();
if (config.getAllowedOrigins() != null && !config.getAllowedOrigins().isEmpty()) {
source.registerCorsConfiguration("/api/**", config);
source.registerCorsConfiguration("/v2/api-docs", config);
source.registerCorsConfiguration("/oauth/**", config);
}
return new CorsFilter(source);
}
}
| apache-2.0 |
google/jax | jax/scipy/linalg.py | 1140 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa: F401
from jax._src.scipy.linalg import (
block_diag as block_diag,
cholesky as cholesky,
cho_factor as cho_factor,
cho_solve as cho_solve,
det as det,
eigh as eigh,
eigh_tridiagonal as eigh_tridiagonal,
expm as expm,
expm_frechet as expm_frechet,
inv as inv,
lu as lu,
lu_factor as lu_factor,
lu_solve as lu_solve,
polar as polar,
qr as qr,
solve as solve,
solve_triangular as solve_triangular,
svd as svd,
tril as tril,
triu as triu,
)
from jax._src.lax.polar import (
polar_unitary as polar_unitary,
)
| apache-2.0 |
vam-google/google-cloud-java | google-api-grpc/proto-google-cloud-monitoring-v3/src/main/java/com/google/monitoring/v3/ListMetricDescriptorsRequest.java | 33401 | // Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/monitoring/v3/metric_service.proto
package com.google.monitoring.v3;
/**
*
*
* <pre>
* The `ListMetricDescriptors` request.
* </pre>
*
* Protobuf type {@code google.monitoring.v3.ListMetricDescriptorsRequest}
*/
public final class ListMetricDescriptorsRequest extends com.google.protobuf.GeneratedMessageV3
implements
// @@protoc_insertion_point(message_implements:google.monitoring.v3.ListMetricDescriptorsRequest)
ListMetricDescriptorsRequestOrBuilder {
private static final long serialVersionUID = 0L;
// Use ListMetricDescriptorsRequest.newBuilder() to construct.
private ListMetricDescriptorsRequest(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private ListMetricDescriptorsRequest() {
name_ = "";
filter_ = "";
pageSize_ = 0;
pageToken_ = "";
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
return this.unknownFields;
}
private ListMetricDescriptorsRequest(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 18:
{
java.lang.String s = input.readStringRequireUtf8();
filter_ = s;
break;
}
case 24:
{
pageSize_ = input.readInt32();
break;
}
case 34:
{
java.lang.String s = input.readStringRequireUtf8();
pageToken_ = s;
break;
}
case 42:
{
java.lang.String s = input.readStringRequireUtf8();
name_ = s;
break;
}
default:
{
if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.monitoring.v3.MetricServiceProto
.internal_static_google_monitoring_v3_ListMetricDescriptorsRequest_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.monitoring.v3.MetricServiceProto
.internal_static_google_monitoring_v3_ListMetricDescriptorsRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.monitoring.v3.ListMetricDescriptorsRequest.class,
com.google.monitoring.v3.ListMetricDescriptorsRequest.Builder.class);
}
public static final int NAME_FIELD_NUMBER = 5;
private volatile java.lang.Object name_;
/**
*
*
* <pre>
* The project on which to execute the request. The format is
* `"projects/{project_id_or_number}"`.
* </pre>
*
* <code>string name = 5;</code>
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
name_ = s;
return s;
}
}
/**
*
*
* <pre>
* The project on which to execute the request. The format is
* `"projects/{project_id_or_number}"`.
* </pre>
*
* <code>string name = 5;</code>
*/
public com.google.protobuf.ByteString getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int FILTER_FIELD_NUMBER = 2;
private volatile java.lang.Object filter_;
/**
*
*
* <pre>
* If this field is empty, all custom and
* system-defined metric descriptors are returned.
* Otherwise, the [filter](/monitoring/api/v3/filters)
* specifies which metric descriptors are to be
* returned. For example, the following filter matches all
* [custom metrics](/monitoring/custom-metrics):
* metric.type = starts_with("custom.googleapis.com/")
* </pre>
*
* <code>string filter = 2;</code>
*/
public java.lang.String getFilter() {
java.lang.Object ref = filter_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
filter_ = s;
return s;
}
}
/**
*
*
* <pre>
* If this field is empty, all custom and
* system-defined metric descriptors are returned.
* Otherwise, the [filter](/monitoring/api/v3/filters)
* specifies which metric descriptors are to be
* returned. For example, the following filter matches all
* [custom metrics](/monitoring/custom-metrics):
* metric.type = starts_with("custom.googleapis.com/")
* </pre>
*
* <code>string filter = 2;</code>
*/
public com.google.protobuf.ByteString getFilterBytes() {
java.lang.Object ref = filter_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
filter_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int PAGE_SIZE_FIELD_NUMBER = 3;
private int pageSize_;
/**
*
*
* <pre>
* A positive number that is the maximum number of results to return.
* </pre>
*
* <code>int32 page_size = 3;</code>
*/
public int getPageSize() {
return pageSize_;
}
public static final int PAGE_TOKEN_FIELD_NUMBER = 4;
private volatile java.lang.Object pageToken_;
/**
*
*
* <pre>
* If this field is not empty then it must contain the `nextPageToken` value
* returned by a previous call to this method. Using this field causes the
* method to return additional results from the previous method call.
* </pre>
*
* <code>string page_token = 4;</code>
*/
public java.lang.String getPageToken() {
java.lang.Object ref = pageToken_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
pageToken_ = s;
return s;
}
}
/**
*
*
* <pre>
* If this field is not empty then it must contain the `nextPageToken` value
* returned by a previous call to this method. Using this field causes the
* method to return additional results from the previous method call.
* </pre>
*
* <code>string page_token = 4;</code>
*/
public com.google.protobuf.ByteString getPageTokenBytes() {
java.lang.Object ref = pageToken_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
pageToken_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
if (!getFilterBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, filter_);
}
if (pageSize_ != 0) {
output.writeInt32(3, pageSize_);
}
if (!getPageTokenBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 4, pageToken_);
}
if (!getNameBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 5, name_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!getFilterBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, filter_);
}
if (pageSize_ != 0) {
size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_);
}
if (!getPageTokenBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, pageToken_);
}
if (!getNameBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, name_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.monitoring.v3.ListMetricDescriptorsRequest)) {
return super.equals(obj);
}
com.google.monitoring.v3.ListMetricDescriptorsRequest other =
(com.google.monitoring.v3.ListMetricDescriptorsRequest) obj;
boolean result = true;
result = result && getName().equals(other.getName());
result = result && getFilter().equals(other.getFilter());
result = result && (getPageSize() == other.getPageSize());
result = result && getPageToken().equals(other.getPageToken());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
hash = (37 * hash) + FILTER_FIELD_NUMBER;
hash = (53 * hash) + getFilter().hashCode();
hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER;
hash = (53 * hash) + getPageSize();
hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER;
hash = (53 * hash) + getPageToken().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseDelimitedFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseDelimitedFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
com.google.protobuf.CodedInputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() {
return newBuilder();
}
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(
com.google.monitoring.v3.ListMetricDescriptorsRequest prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
*
* <pre>
* The `ListMetricDescriptors` request.
* </pre>
*
* Protobuf type {@code google.monitoring.v3.ListMetricDescriptorsRequest}
*/
public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
implements
// @@protoc_insertion_point(builder_implements:google.monitoring.v3.ListMetricDescriptorsRequest)
com.google.monitoring.v3.ListMetricDescriptorsRequestOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.monitoring.v3.MetricServiceProto
.internal_static_google_monitoring_v3_ListMetricDescriptorsRequest_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.monitoring.v3.MetricServiceProto
.internal_static_google_monitoring_v3_ListMetricDescriptorsRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.monitoring.v3.ListMetricDescriptorsRequest.class,
com.google.monitoring.v3.ListMetricDescriptorsRequest.Builder.class);
}
// Construct using com.google.monitoring.v3.ListMetricDescriptorsRequest.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
}
@java.lang.Override
public Builder clear() {
super.clear();
name_ = "";
filter_ = "";
pageSize_ = 0;
pageToken_ = "";
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return com.google.monitoring.v3.MetricServiceProto
.internal_static_google_monitoring_v3_ListMetricDescriptorsRequest_descriptor;
}
@java.lang.Override
public com.google.monitoring.v3.ListMetricDescriptorsRequest getDefaultInstanceForType() {
return com.google.monitoring.v3.ListMetricDescriptorsRequest.getDefaultInstance();
}
@java.lang.Override
public com.google.monitoring.v3.ListMetricDescriptorsRequest build() {
com.google.monitoring.v3.ListMetricDescriptorsRequest result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.monitoring.v3.ListMetricDescriptorsRequest buildPartial() {
com.google.monitoring.v3.ListMetricDescriptorsRequest result =
new com.google.monitoring.v3.ListMetricDescriptorsRequest(this);
result.name_ = name_;
result.filter_ = filter_;
result.pageSize_ = pageSize_;
result.pageToken_ = pageToken_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.monitoring.v3.ListMetricDescriptorsRequest) {
return mergeFrom((com.google.monitoring.v3.ListMetricDescriptorsRequest) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(com.google.monitoring.v3.ListMetricDescriptorsRequest other) {
if (other == com.google.monitoring.v3.ListMetricDescriptorsRequest.getDefaultInstance())
return this;
if (!other.getName().isEmpty()) {
name_ = other.name_;
onChanged();
}
if (!other.getFilter().isEmpty()) {
filter_ = other.filter_;
onChanged();
}
if (other.getPageSize() != 0) {
setPageSize(other.getPageSize());
}
if (!other.getPageToken().isEmpty()) {
pageToken_ = other.pageToken_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.monitoring.v3.ListMetricDescriptorsRequest parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage =
(com.google.monitoring.v3.ListMetricDescriptorsRequest) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private java.lang.Object name_ = "";
/**
*
*
* <pre>
* The project on which to execute the request. The format is
* `"projects/{project_id_or_number}"`.
* </pre>
*
* <code>string name = 5;</code>
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
name_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
*
* <pre>
* The project on which to execute the request. The format is
* `"projects/{project_id_or_number}"`.
* </pre>
*
* <code>string name = 5;</code>
*/
public com.google.protobuf.ByteString getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
*
* <pre>
* The project on which to execute the request. The format is
* `"projects/{project_id_or_number}"`.
* </pre>
*
* <code>string name = 5;</code>
*/
public Builder setName(java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
name_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* The project on which to execute the request. The format is
* `"projects/{project_id_or_number}"`.
* </pre>
*
* <code>string name = 5;</code>
*/
public Builder clearName() {
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
/**
*
*
* <pre>
* The project on which to execute the request. The format is
* `"projects/{project_id_or_number}"`.
* </pre>
*
* <code>string name = 5;</code>
*/
public Builder setNameBytes(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
name_ = value;
onChanged();
return this;
}
private java.lang.Object filter_ = "";
/**
*
*
* <pre>
* If this field is empty, all custom and
* system-defined metric descriptors are returned.
* Otherwise, the [filter](/monitoring/api/v3/filters)
* specifies which metric descriptors are to be
* returned. For example, the following filter matches all
* [custom metrics](/monitoring/custom-metrics):
* metric.type = starts_with("custom.googleapis.com/")
* </pre>
*
* <code>string filter = 2;</code>
*/
public java.lang.String getFilter() {
java.lang.Object ref = filter_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
filter_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
*
* <pre>
* If this field is empty, all custom and
* system-defined metric descriptors are returned.
* Otherwise, the [filter](/monitoring/api/v3/filters)
* specifies which metric descriptors are to be
* returned. For example, the following filter matches all
* [custom metrics](/monitoring/custom-metrics):
* metric.type = starts_with("custom.googleapis.com/")
* </pre>
*
* <code>string filter = 2;</code>
*/
public com.google.protobuf.ByteString getFilterBytes() {
java.lang.Object ref = filter_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
filter_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
*
* <pre>
* If this field is empty, all custom and
* system-defined metric descriptors are returned.
* Otherwise, the [filter](/monitoring/api/v3/filters)
* specifies which metric descriptors are to be
* returned. For example, the following filter matches all
* [custom metrics](/monitoring/custom-metrics):
* metric.type = starts_with("custom.googleapis.com/")
* </pre>
*
* <code>string filter = 2;</code>
*/
public Builder setFilter(java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
filter_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* If this field is empty, all custom and
* system-defined metric descriptors are returned.
* Otherwise, the [filter](/monitoring/api/v3/filters)
* specifies which metric descriptors are to be
* returned. For example, the following filter matches all
* [custom metrics](/monitoring/custom-metrics):
* metric.type = starts_with("custom.googleapis.com/")
* </pre>
*
* <code>string filter = 2;</code>
*/
public Builder clearFilter() {
filter_ = getDefaultInstance().getFilter();
onChanged();
return this;
}
/**
*
*
* <pre>
* If this field is empty, all custom and
* system-defined metric descriptors are returned.
* Otherwise, the [filter](/monitoring/api/v3/filters)
* specifies which metric descriptors are to be
* returned. For example, the following filter matches all
* [custom metrics](/monitoring/custom-metrics):
* metric.type = starts_with("custom.googleapis.com/")
* </pre>
*
* <code>string filter = 2;</code>
*/
public Builder setFilterBytes(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
filter_ = value;
onChanged();
return this;
}
private int pageSize_;
/**
*
*
* <pre>
* A positive number that is the maximum number of results to return.
* </pre>
*
* <code>int32 page_size = 3;</code>
*/
public int getPageSize() {
return pageSize_;
}
/**
*
*
* <pre>
* A positive number that is the maximum number of results to return.
* </pre>
*
* <code>int32 page_size = 3;</code>
*/
public Builder setPageSize(int value) {
pageSize_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* A positive number that is the maximum number of results to return.
* </pre>
*
* <code>int32 page_size = 3;</code>
*/
public Builder clearPageSize() {
pageSize_ = 0;
onChanged();
return this;
}
private java.lang.Object pageToken_ = "";
/**
*
*
* <pre>
* If this field is not empty then it must contain the `nextPageToken` value
* returned by a previous call to this method. Using this field causes the
* method to return additional results from the previous method call.
* </pre>
*
* <code>string page_token = 4;</code>
*/
public java.lang.String getPageToken() {
java.lang.Object ref = pageToken_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
pageToken_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
*
* <pre>
* If this field is not empty then it must contain the `nextPageToken` value
* returned by a previous call to this method. Using this field causes the
* method to return additional results from the previous method call.
* </pre>
*
* <code>string page_token = 4;</code>
*/
public com.google.protobuf.ByteString getPageTokenBytes() {
java.lang.Object ref = pageToken_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
pageToken_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
*
* <pre>
* If this field is not empty then it must contain the `nextPageToken` value
* returned by a previous call to this method. Using this field causes the
* method to return additional results from the previous method call.
* </pre>
*
* <code>string page_token = 4;</code>
*/
public Builder setPageToken(java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
pageToken_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* If this field is not empty then it must contain the `nextPageToken` value
* returned by a previous call to this method. Using this field causes the
* method to return additional results from the previous method call.
* </pre>
*
* <code>string page_token = 4;</code>
*/
public Builder clearPageToken() {
pageToken_ = getDefaultInstance().getPageToken();
onChanged();
return this;
}
/**
*
*
* <pre>
* If this field is not empty then it must contain the `nextPageToken` value
* returned by a previous call to this method. Using this field causes the
* method to return additional results from the previous method call.
* </pre>
*
* <code>string page_token = 4;</code>
*/
public Builder setPageTokenBytes(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
pageToken_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.monitoring.v3.ListMetricDescriptorsRequest)
}
// @@protoc_insertion_point(class_scope:google.monitoring.v3.ListMetricDescriptorsRequest)
private static final com.google.monitoring.v3.ListMetricDescriptorsRequest DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.monitoring.v3.ListMetricDescriptorsRequest();
}
public static com.google.monitoring.v3.ListMetricDescriptorsRequest getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<ListMetricDescriptorsRequest> PARSER =
new com.google.protobuf.AbstractParser<ListMetricDescriptorsRequest>() {
@java.lang.Override
public ListMetricDescriptorsRequest parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ListMetricDescriptorsRequest(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<ListMetricDescriptorsRequest> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<ListMetricDescriptorsRequest> getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.monitoring.v3.ListMetricDescriptorsRequest getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
| apache-2.0 |
InspirarDigital/Android-aulas | Aula3/app/src/main/java/com/atilabraga/aula3/ui/MainActivity.java | 4467 | package com.atilabraga.aula3.ui;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.ImageView;
import android.widget.Spinner;
import android.widget.TextView;
import com.atilabraga.aula3.R;
import com.atilabraga.aula3.WeatherCallback;
import com.atilabraga.aula3.model.City;
import com.atilabraga.aula3.network.WeatherApi;
import com.atilabraga.aula3.network.WeatherAsync;
import com.bumptech.glide.Glide;
import com.google.gson.Gson;
import java.util.ArrayList;
import java.util.HashMap;
import butterknife.Bind;
import butterknife.ButterKnife;
import retrofit.Call;
import retrofit.Callback;
import retrofit.Response;
import retrofit.Retrofit;
public class MainActivity extends AppCompatActivity implements WeatherCallback {
@Bind(R.id.main_image)
ImageView ivImage;
@Bind(R.id.main_cities)
Spinner spnCity;
@Bind(R.id.main_tv_1)
TextView tvName;
@Bind(R.id.main_tv_2)
TextView tvVisibility;
@Bind(R.id.main_tv_3)
TextView tvLatitude;
@Bind(R.id.main_tv_4)
TextView tvLongitude;
private WeatherAsync mTask;
private HashMap<Integer, City> mCityMap;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
ButterKnife.bind(this);
mCityMap = new HashMap<>();
mCityMap.put(0, new City(getString(R.string.api_sapucaia), "Sapucaia"));
mCityMap.put(1, new City(getString(R.string.api_rio_de_janeiro), "Rio de Janeiro"));
mCityMap.put(2, new City(getString(R.string.api_belo_horizonte), "Belo Horizonte"));
mCityMap.put(3, new City(getString(R.string.api_juiz_de_fora), "Juiz de Fora"));
mCityMap.put(4, new City(getString(R.string.api_maceio), "Maceió"));
mCityMap.put(5, new City(getString(R.string.api_gracas), "Graças"));
ArrayList<City> cityList = new ArrayList(mCityMap.values());
// ArrayList<String> cityNameList = new ArrayList<>();
// for (City city : cityList) {
// cityNameList.add(city.getName());
// }
// ArrayAdapter adapter = new ArrayAdapter<>(this, android.R.layout.simple_spinner_dropdown_item,
// cityNameList);
ArrayAdapter<CharSequence> adapter = ArrayAdapter.createFromResource(this, R.array.cities,
android.R.layout.simple_spinner_item);
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
spnCity.setAdapter(adapter);
spnCity.post(new Runnable() {
@Override
public void run() {
spnCity.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> adapterView, View view, final int i, long l) {
City city = mCityMap.get(i);
// mTask = new WeatherAsync(MainActivity.this, MainActivity.this);
// mTask.execute(city.getId());
Call<City> callback = WeatherApi.getInstance().getCityInfoById(city.getId());
callback.enqueue(new Callback<City>() {
@Override
public void onResponse(Response<City> response, Retrofit retrofit) {
showInfo(i, response.body());
}
@Override
public void onFailure(Throwable t) {
}
});
}
@Override
public void onNothingSelected(AdapterView<?> adapterView) {
}
});
}
});
}
@Override
public void onFinish(String json) {
City city = new Gson().fromJson(json, City.class);
// showInfo(city);
}
private void showInfo(int i, City city) {
Glide.with(this).load("http://lorempixel.com/400/200/city/" + i).into(ivImage);
tvName.setText(city.getName());
tvVisibility.setText(String.valueOf(city.getVisibility()));
tvLatitude.setText(city.getCoordinate().getLatitude());
tvLongitude.setText(city.getCoordinate().getLongitude());
}
}
| apache-2.0 |
Banno/sbt-plantuml-plugin | src/main/java/net/sourceforge/plantuml/classdiagram/ClassDiagramFactory.java | 5902 | /* ========================================================================
* PlantUML : a free UML diagram generator
* ========================================================================
*
* (C) Copyright 2009-2017, Arnaud Roques
*
* Project Info: http://plantuml.com
*
* This file is part of PlantUML.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* Original Author: Arnaud Roques
*/
package net.sourceforge.plantuml.classdiagram;
import java.util.ArrayList;
import java.util.List;
import net.sourceforge.plantuml.UmlDiagramType;
import net.sourceforge.plantuml.classdiagram.command.CommandAddMethod;
import net.sourceforge.plantuml.classdiagram.command.CommandAllowMixing;
import net.sourceforge.plantuml.classdiagram.command.CommandCreateClass;
import net.sourceforge.plantuml.classdiagram.command.CommandCreateClassMultilines;
import net.sourceforge.plantuml.classdiagram.command.CommandCreateElementFull2;
import net.sourceforge.plantuml.classdiagram.command.CommandCreateElementFull2.Mode;
import net.sourceforge.plantuml.classdiagram.command.CommandDiamondAssociation;
import net.sourceforge.plantuml.classdiagram.command.CommandHideShowSpecificClass;
import net.sourceforge.plantuml.classdiagram.command.CommandHideShowSpecificStereotype;
import net.sourceforge.plantuml.classdiagram.command.CommandImport;
import net.sourceforge.plantuml.classdiagram.command.CommandLayoutNewLine;
import net.sourceforge.plantuml.classdiagram.command.CommandLinkClass;
import net.sourceforge.plantuml.classdiagram.command.CommandLinkLollipop;
import net.sourceforge.plantuml.classdiagram.command.CommandNamespaceSeparator;
import net.sourceforge.plantuml.classdiagram.command.CommandStereotype;
import net.sourceforge.plantuml.classdiagram.command.CommandUrl;
import net.sourceforge.plantuml.command.Command;
import net.sourceforge.plantuml.command.CommandEndPackage;
import net.sourceforge.plantuml.command.CommandFootboxIgnored;
import net.sourceforge.plantuml.command.CommandNamespace;
import net.sourceforge.plantuml.command.CommandPackage;
import net.sourceforge.plantuml.command.CommandPackageEmpty;
import net.sourceforge.plantuml.command.CommandPage;
import net.sourceforge.plantuml.command.CommandRankDir;
import net.sourceforge.plantuml.command.UmlDiagramFactory;
import net.sourceforge.plantuml.command.note.FactoryNoteCommand;
import net.sourceforge.plantuml.command.note.FactoryNoteOnEntityCommand;
import net.sourceforge.plantuml.command.note.FactoryNoteOnLinkCommand;
import net.sourceforge.plantuml.command.note.FactoryTipOnEntityCommand;
import net.sourceforge.plantuml.command.regex.RegexLeaf;
import net.sourceforge.plantuml.descdiagram.command.CommandNewpage;
import net.sourceforge.plantuml.objectdiagram.command.CommandCreateEntityObject;
import net.sourceforge.plantuml.objectdiagram.command.CommandCreateEntityObjectMultilines;
public class ClassDiagramFactory extends UmlDiagramFactory {
@Override
public ClassDiagram createEmptyDiagram() {
return new ClassDiagram();
}
@Override
protected List<Command> createCommands() {
final List<Command> cmds = new ArrayList<Command>();
cmds.add(new CommandFootboxIgnored());
addCommonCommands(cmds);
cmds.add(new CommandRankDir());
cmds.add(new CommandNewpage(this));
cmds.add(new CommandHideShowSpecificStereotype());
cmds.add(new CommandPage());
cmds.add(new CommandAddMethod());
cmds.add(new CommandCreateClass());
cmds.add(new CommandCreateEntityObject());
cmds.add(new CommandAllowMixing());
cmds.add(new CommandLayoutNewLine());
cmds.add(new CommandCreateElementFull2(Mode.NORMAL_KEYWORD));
cmds.add(new CommandCreateElementFull2(Mode.WITH_MIX_PREFIX));
final FactoryNoteCommand factoryNoteCommand = new FactoryNoteCommand();
cmds.add(factoryNoteCommand.createSingleLine());
cmds.add(new CommandPackage());
cmds.add(new CommandEndPackage());
cmds.add(new CommandPackageEmpty());
cmds.add(new CommandNamespace());
cmds.add(new CommandStereotype());
cmds.add(new CommandLinkClass(UmlDiagramType.CLASS));
cmds.add(new CommandLinkLollipop(UmlDiagramType.CLASS));
cmds.add(new CommandImport());
final FactoryTipOnEntityCommand factoryTipOnEntityCommand = new FactoryTipOnEntityCommand(new RegexLeaf(
"ENTITY", "(" + CommandCreateClass.CODE_NO_DOTDOT + "|[%g][^%g]+[%g])::([^%s]+)"));
cmds.add(factoryTipOnEntityCommand.createMultiLine(true));
cmds.add(factoryTipOnEntityCommand.createMultiLine(false));
final FactoryNoteOnEntityCommand factoryNoteOnEntityCommand = new FactoryNoteOnEntityCommand(new RegexLeaf(
"ENTITY", "(" + CommandCreateClass.CODE + "|[%g][^%g]+[%g])"));
cmds.add(factoryNoteOnEntityCommand.createSingleLine());
cmds.add(new CommandUrl());
cmds.add(factoryNoteOnEntityCommand.createMultiLine(true));
cmds.add(factoryNoteOnEntityCommand.createMultiLine(false));
cmds.add(factoryNoteCommand.createMultiLine(false));
cmds.add(new CommandCreateClassMultilines());
cmds.add(new CommandCreateEntityObjectMultilines());
final FactoryNoteOnLinkCommand factoryNoteOnLinkCommand = new FactoryNoteOnLinkCommand();
cmds.add(factoryNoteOnLinkCommand.createSingleLine());
cmds.add(factoryNoteOnLinkCommand.createMultiLine(false));
cmds.add(new CommandDiamondAssociation());
cmds.add(new CommandHideShowSpecificClass());
cmds.add(new CommandNamespaceSeparator());
return cmds;
}
}
| apache-2.0 |
ingorichtsmeier/camunda-bpm-platform | engine/src/main/java/org/camunda/bpm/engine/impl/ProcessInstanceQueryImpl.java | 15531 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl;
import static org.camunda.bpm.engine.impl.util.EnsureUtil.ensureNotEmpty;
import static org.camunda.bpm.engine.impl.util.EnsureUtil.ensureNotNull;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import org.camunda.bpm.engine.ProcessEngineException;
import org.camunda.bpm.engine.impl.cfg.ProcessEngineConfigurationImpl;
import org.camunda.bpm.engine.impl.context.Context;
import org.camunda.bpm.engine.impl.interceptor.CommandContext;
import org.camunda.bpm.engine.impl.interceptor.CommandExecutor;
import org.camunda.bpm.engine.impl.persistence.entity.SuspensionState;
import org.camunda.bpm.engine.impl.util.ImmutablePair;
import org.camunda.bpm.engine.impl.variable.serializer.VariableSerializers;
import org.camunda.bpm.engine.runtime.ProcessInstance;
import org.camunda.bpm.engine.runtime.ProcessInstanceQuery;
/**
* @author Tom Baeyens
* @author Joram Barrez
* @author Frederik Heremans
* @author Falko Menge
* @author Daniel Meyer
*/
public class ProcessInstanceQueryImpl extends AbstractVariableQueryImpl<ProcessInstanceQuery, ProcessInstance> implements ProcessInstanceQuery, Serializable {
private static final long serialVersionUID = 1L;
protected String processInstanceId;
protected String businessKey;
protected String businessKeyLike;
protected String processDefinitionId;
protected Set<String> processInstanceIds;
protected String processDefinitionKey;
protected String[] processDefinitionKeys;
protected String[] processDefinitionKeyNotIn;
protected String deploymentId;
protected String superProcessInstanceId;
protected String subProcessInstanceId;
protected SuspensionState suspensionState;
protected boolean withIncident;
protected String incidentType;
protected String incidentId;
protected String incidentMessage;
protected String incidentMessageLike;
protected String caseInstanceId;
protected String superCaseInstanceId;
protected String subCaseInstanceId;
protected String[] activityIds;
protected boolean isRootProcessInstances;
protected boolean isLeafProcessInstances;
protected boolean isTenantIdSet = false;
protected String[] tenantIds;
protected boolean isProcessDefinitionWithoutTenantId = false;
// or query /////////////////////////////
protected List<ProcessInstanceQueryImpl> queries = new ArrayList<>(Arrays.asList(this));
protected boolean isOrQueryActive = false;
public ProcessInstanceQueryImpl() {
}
public ProcessInstanceQueryImpl(CommandExecutor commandExecutor) {
super(commandExecutor);
}
public ProcessInstanceQueryImpl processInstanceId(String processInstanceId) {
ensureNotNull("Process instance id", processInstanceId);
this.processInstanceId = processInstanceId;
return this;
}
public ProcessInstanceQuery processInstanceIds(Set<String> processInstanceIds) {
ensureNotEmpty("Set of process instance ids", processInstanceIds);
this.processInstanceIds = processInstanceIds;
return this;
}
public ProcessInstanceQuery processInstanceBusinessKey(String businessKey) {
ensureNotNull("Business key", businessKey);
this.businessKey = businessKey;
return this;
}
public ProcessInstanceQuery processInstanceBusinessKey(String businessKey, String processDefinitionKey) {
ensureNotNull("Business key", businessKey);
this.businessKey = businessKey;
this.processDefinitionKey = processDefinitionKey;
return this;
}
public ProcessInstanceQuery processInstanceBusinessKeyLike(String businessKeyLike) {
this.businessKeyLike = businessKeyLike;
return this;
}
public ProcessInstanceQueryImpl processDefinitionId(String processDefinitionId) {
ensureNotNull("Process definition id", processDefinitionId);
this.processDefinitionId = processDefinitionId;
return this;
}
public ProcessInstanceQueryImpl processDefinitionKey(String processDefinitionKey) {
ensureNotNull("Process definition key", processDefinitionKey);
this.processDefinitionKey = processDefinitionKey;
return this;
}
public ProcessInstanceQuery processDefinitionKeyIn(String... processDefinitionKeys) {
ensureNotNull("processDefinitionKeys", (Object[]) processDefinitionKeys);
this.processDefinitionKeys = processDefinitionKeys;
return this;
}
public ProcessInstanceQuery processDefinitionKeyNotIn(String... processDefinitionKeys) {
ensureNotNull("processDefinitionKeyNotIn", (Object[]) processDefinitionKeys);
this.processDefinitionKeyNotIn = processDefinitionKeys;
return this;
}
public ProcessInstanceQuery deploymentId(String deploymentId) {
ensureNotNull("Deployment id", deploymentId);
this.deploymentId = deploymentId;
return this;
}
public ProcessInstanceQuery superProcessInstanceId(String superProcessInstanceId) {
if (isRootProcessInstances) {
throw new ProcessEngineException("Invalid query usage: cannot set both rootProcessInstances and superProcessInstanceId");
}
this.superProcessInstanceId = superProcessInstanceId;
return this;
}
public ProcessInstanceQuery subProcessInstanceId(String subProcessInstanceId) {
this.subProcessInstanceId = subProcessInstanceId;
return this;
}
public ProcessInstanceQuery caseInstanceId(String caseInstanceId) {
ensureNotNull("caseInstanceId", caseInstanceId);
this.caseInstanceId = caseInstanceId;
return this;
}
public ProcessInstanceQuery superCaseInstanceId(String superCaseInstanceId) {
ensureNotNull("superCaseInstanceId", superCaseInstanceId);
this.superCaseInstanceId = superCaseInstanceId;
return this;
}
public ProcessInstanceQuery subCaseInstanceId(String subCaseInstanceId) {
ensureNotNull("subCaseInstanceId", subCaseInstanceId);
this.subCaseInstanceId = subCaseInstanceId;
return this;
}
public ProcessInstanceQuery orderByProcessInstanceId() {
if (isOrQueryActive) {
throw new ProcessEngineException("Invalid query usage: cannot set orderByProcessInstanceId() within 'or' query");
}
orderBy(ProcessInstanceQueryProperty.PROCESS_INSTANCE_ID);
return this;
}
public ProcessInstanceQuery orderByProcessDefinitionId() {
if (isOrQueryActive) {
throw new ProcessEngineException("Invalid query usage: cannot set orderByProcessDefinitionId() within 'or' query");
}
orderBy(new QueryOrderingProperty(QueryOrderingProperty.RELATION_PROCESS_DEFINITION,
ProcessInstanceQueryProperty.PROCESS_DEFINITION_ID));
return this;
}
public ProcessInstanceQuery orderByProcessDefinitionKey() {
if (isOrQueryActive) {
throw new ProcessEngineException("Invalid query usage: cannot set orderByProcessDefinitionKey() within 'or' query");
}
orderBy(new QueryOrderingProperty(QueryOrderingProperty.RELATION_PROCESS_DEFINITION,
ProcessInstanceQueryProperty.PROCESS_DEFINITION_KEY));
return this;
}
public ProcessInstanceQuery orderByTenantId() {
if (isOrQueryActive) {
throw new ProcessEngineException("Invalid query usage: cannot set orderByTenantId() within 'or' query");
}
orderBy(ProcessInstanceQueryProperty.TENANT_ID);
return this;
}
public ProcessInstanceQuery orderByBusinessKey() {
if (isOrQueryActive) {
throw new ProcessEngineException("Invalid query usage: cannot set orderByBusinessKey() within 'or' query");
}
orderBy(ProcessInstanceQueryProperty.BUSINESS_KEY);
return this;
}
public ProcessInstanceQuery active() {
this.suspensionState = SuspensionState.ACTIVE;
return this;
}
public ProcessInstanceQuery suspended() {
this.suspensionState = SuspensionState.SUSPENDED;
return this;
}
public ProcessInstanceQuery withIncident() {
this.withIncident = true;
return this;
}
public ProcessInstanceQuery incidentType(String incidentType) {
ensureNotNull("incident type", incidentType);
this.incidentType = incidentType;
return this;
}
public ProcessInstanceQuery incidentId(String incidentId) {
ensureNotNull("incident id", incidentId);
this.incidentId = incidentId;
return this;
}
public ProcessInstanceQuery incidentMessage(String incidentMessage) {
ensureNotNull("incident message", incidentMessage);
this.incidentMessage = incidentMessage;
return this;
}
public ProcessInstanceQuery incidentMessageLike(String incidentMessageLike) {
ensureNotNull("incident messageLike", incidentMessageLike);
this.incidentMessageLike = incidentMessageLike;
return this;
}
public ProcessInstanceQuery tenantIdIn(String... tenantIds) {
ensureNotNull("tenantIds", (Object[]) tenantIds);
this.tenantIds = tenantIds;
isTenantIdSet = true;
return this;
}
public ProcessInstanceQuery withoutTenantId() {
tenantIds = null;
isTenantIdSet = true;
return this;
}
public ProcessInstanceQuery activityIdIn(String... activityIds) {
ensureNotNull("activity ids", (Object[]) activityIds);
this.activityIds = activityIds;
return this;
}
public ProcessInstanceQuery rootProcessInstances() {
if (superProcessInstanceId != null) {
throw new ProcessEngineException("Invalid query usage: cannot set both rootProcessInstances and superProcessInstanceId");
}
isRootProcessInstances = true;
return this;
}
public ProcessInstanceQuery leafProcessInstances() {
if(subProcessInstanceId != null) {
throw new ProcessEngineException("Invalid query usage: cannot set both leafProcessInstances and subProcessInstanceId");
}
isLeafProcessInstances = true;
return this;
}
public ProcessInstanceQuery processDefinitionWithoutTenantId() {
isProcessDefinitionWithoutTenantId = true;
return this;
}
//results /////////////////////////////////////////////////////////////////
@Override
protected void checkQueryOk() {
ensureVariablesInitialized();
super.checkQueryOk();
}
@Override
public long executeCount(CommandContext commandContext) {
checkQueryOk();
return commandContext
.getExecutionManager()
.findProcessInstanceCountByQueryCriteria(this);
}
@Override
public List<ProcessInstance> executeList(CommandContext commandContext, Page page) {
checkQueryOk();
return commandContext
.getExecutionManager()
.findProcessInstancesByQueryCriteria(this, page);
}
public List<String> executeIdsList(CommandContext commandContext) {
checkQueryOk();
return commandContext
.getExecutionManager()
.findProcessInstancesIdsByQueryCriteria(this);
}
@Override
public List<ImmutablePair<String, String>> executeDeploymentIdMappingsList(CommandContext commandContext) {
checkQueryOk();
return commandContext
.getExecutionManager()
.findDeploymentIdMappingsByQueryCriteria(this);
}
@Override
protected void ensureVariablesInitialized() {
super.ensureVariablesInitialized();
if (!queries.isEmpty()) {
ProcessEngineConfigurationImpl processEngineConfiguration = Context.getProcessEngineConfiguration();
VariableSerializers variableSerializers = processEngineConfiguration.getVariableSerializers();
String dbType = processEngineConfiguration.getDatabaseType();
for (ProcessInstanceQueryImpl orQuery: queries) {
for (QueryVariableValue var : orQuery.queryVariableValues) {
var.initialize(variableSerializers, dbType);
}
}
}
}
//getters /////////////////////////////////////////////////////////////////
public String getProcessInstanceId() {
return processInstanceId;
}
public Set<String> getProcessInstanceIds() {
return processInstanceIds;
}
public List<ProcessInstanceQueryImpl> getQueries() {
return queries;
}
public void addOrQuery(ProcessInstanceQueryImpl orQuery) {
orQuery.isOrQueryActive = true;
this.queries.add(orQuery);
}
public void setOrQueryActive() {
isOrQueryActive = true;
}
public boolean isOrQueryActive() {
return isOrQueryActive;
}
public String[] getActivityIds() {
return activityIds;
}
public String getBusinessKey() {
return businessKey;
}
public String getBusinessKeyLike() {
return businessKeyLike;
}
public String getProcessDefinitionId() {
return processDefinitionId;
}
public String getProcessDefinitionKey() {
return processDefinitionKey;
}
public String[] getProcessDefinitionKeys() {
return processDefinitionKeys;
}
public String[] getProcessDefinitionKeyNotIn() {
return processDefinitionKeyNotIn;
}
public String getDeploymentId() {
return deploymentId;
}
public String getSuperProcessInstanceId() {
return superProcessInstanceId;
}
public String getSubProcessInstanceId() {
return subProcessInstanceId;
}
public SuspensionState getSuspensionState() {
return suspensionState;
}
public void setSuspensionState(SuspensionState suspensionState) {
this.suspensionState = suspensionState;
}
public boolean isWithIncident() {
return withIncident;
}
public String getIncidentId() {
return incidentId;
}
public String getIncidentType() {
return incidentType;
}
public String getIncidentMessage() {
return incidentMessage;
}
public String getIncidentMessageLike() {
return incidentMessageLike;
}
public String getCaseInstanceId() {
return caseInstanceId;
}
public String getSuperCaseInstanceId() {
return superCaseInstanceId;
}
public String getSubCaseInstanceId() {
return subCaseInstanceId;
}
public boolean isTenantIdSet() {
return isTenantIdSet;
}
public boolean isRootProcessInstances() {
return isRootProcessInstances;
}
public boolean isProcessDefinitionWithoutTenantId() {
return isProcessDefinitionWithoutTenantId;
}
public boolean isLeafProcessInstances() {
return isLeafProcessInstances;
}
public String[] getTenantIds() {
return tenantIds;
}
@Override
public ProcessInstanceQuery or() {
if (this != queries.get(0)) {
throw new ProcessEngineException("Invalid query usage: cannot set or() within 'or' query");
}
ProcessInstanceQueryImpl orQuery = new ProcessInstanceQueryImpl();
orQuery.isOrQueryActive = true;
orQuery.queries = queries;
queries.add(orQuery);
return orQuery;
}
@Override
public ProcessInstanceQuery endOr() {
if (!queries.isEmpty() && this != queries.get(queries.size()-1)) {
throw new ProcessEngineException("Invalid query usage: cannot set endOr() before or()");
}
return queries.get(0);
}
} | apache-2.0 |
liam-kelly/CloudBurst | includes/views/settings.php | 245 | <div id="set">
<div id="sidebar">
<?php
//Include the settings menu
require_once(ABSPATH.'includes/views/settings_menus/menu.php')
?>
</div>
<p>This is where settings will go.</p>
</div> | apache-2.0 |
bitgirder/bitgirder-main | mingle-service/go/lib/mingle/service/errorf.go | 262 | package service
import (
"fmt"
"errors"
)
func libError( msg string ) error {
return errors.New( "mingle/service: " + msg )
}
func libErrorf( tmpl string, argv ...interface{} ) error {
return fmt.Errorf( "mingle/service: " + tmpl, argv... )
}
| apache-2.0 |
mikeng13/slcCampNYC_teamMnM_mobile | Models/Calendar.cs | 230 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace HelloSLC.Models
{
public class Calendar
{
public string Title { get; set; }
}
}
| apache-2.0 |
cwx521/Husky | src/Husky.Principal.Users/Data/Entities/UserGroup.cs | 231 | using System.ComponentModel.DataAnnotations;
namespace Husky.Principal.Users.Data
{
public class UserGroup
{
public int Id { get; set; }
[StringLength(50), Required]
public string GroupName { get; set; } = null!;
}
}
| apache-2.0 |
datathings/greycat | plugins/ml/src/test/java/greycatMLTest/GPSIndex.java | 4837 | /**
* Copyright 2017-2019 The GreyCat Authors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package greycatMLTest;
import greycat.Callback;
import greycat.Graph;
import greycat.GraphBuilder;
import greycat.Node;
import greycat.internal.custom.KDTreeNode;
import greycat.struct.TreeResult;
import org.junit.Test;
import java.util.Random;
public class GPSIndex {
@Test
public void testgps() {
final Graph graph = new GraphBuilder()
.withMemorySize(1000000)
.build();
graph.connect(new Callback<Boolean>() {
@Override
public void on(Boolean result) {
//Can be changed to NDTree as well
KDTreeNode kdTree = (KDTreeNode) graph.newTypedNode(0, 0, KDTreeNode.NAME);
int dim = 2; //0 for lat and 1 for lng
double[] precisions = new double[dim];
double[] boundMin = new double[dim];
double[] boundMax = new double[dim];
precisions[0] = 0.00000001;
precisions[1] = 0.00000001;
boundMin[0] = -90;
boundMin[1] = -180;
boundMax[0] = 90;
boundMax[1] = 180;
kdTree.setResolution(precisions);
kdTree.setMinBound(boundMin);
kdTree.setMaxBound(boundMax);
Random random = new Random();
random.setSeed(125362l);
int inserts = 10000;
int test = 100;
int nsearch = 4;
Node[] nodes = new Node[inserts];
double[][] keys = new double[inserts][];
double[][] keysTest = new double[test][];
for (int i = 0; i < inserts; i++) {
double[] key = new double[dim];
//generate random gps points between -90,-180 and +90,+180
key[0] = random.nextDouble() * 180 - 90;
key[1] = random.nextDouble() * 360 - 180;
keys[i] = key;
nodes[i] = graph.newNode(0, 0);
}
for (int i = 0; i < test; i++) {
double[] key = new double[dim];
//generate test gps points between -90,-180 and +90,+180
key[0] = random.nextDouble() * 180 - 90;
key[1] = random.nextDouble() * 360 - 180;
keysTest[i] = key;
}
long ts = System.currentTimeMillis();
for (int i = 0; i < inserts; i++) {
kdTree.insert(keys[i], nodes[i].id());
}
long te = System.currentTimeMillis() - ts;
System.out.println("KDTree insert: " + te + " ms");
long[][] tempkdtree = new long[test][nsearch];
ts = System.currentTimeMillis();
for (int i = 0; i < test; i++) {
TreeResult res = kdTree.queryAround(keysTest[i], nsearch);
for (int j = 0; j < nsearch; j++) {
tempkdtree[i][j] = res.value(j);
}
res.free();
}
te = System.currentTimeMillis() - ts;
System.out.println("KDTree get all: " + te + " ms");
System.out.println("");
System.out.println("KDTree size: " + kdTree.size());
System.out.println("");
double[] mins = new double[dim];
double[] maxs = new double[dim];
mins[0] = -20;
mins[1] = -20;
maxs[0] = 20;
maxs[1] = 20;
ts = System.currentTimeMillis();
TreeResult trangeKD = kdTree.queryArea(mins, maxs);
te = System.currentTimeMillis() - ts;
// System.out.println("KDTree range: " + te + " ms");
// System.out.println("found: " + trangeKD.size() + " result in this area");
graph.disconnect(new Callback<Boolean>() {
@Override
public void on(Boolean result) {
}
});
}
});
}
}
| apache-2.0 |
NationalSecurityAgency/ghidra | Ghidra/Features/Base/src/test.slow/java/ghidra/app/plugin/core/symboltree/SymbolTreePlugin1Test.java | 31370 | /* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.symboltree;
import static org.junit.Assert.*;
import java.awt.Container;
import java.awt.datatransfer.Transferable;
import java.awt.dnd.DnDConstants;
import java.lang.reflect.InvocationTargetException;
import java.util.*;
import javax.swing.*;
import javax.swing.tree.DefaultTreeCellEditor;
import javax.swing.tree.TreePath;
import org.junit.*;
import docking.ActionContext;
import docking.action.DockingActionIf;
import docking.widgets.OptionDialog;
import docking.widgets.tree.GTree;
import docking.widgets.tree.GTreeNode;
import docking.widgets.tree.support.GTreeDragNDropHandler;
import docking.widgets.tree.support.GTreeNodeTransferable;
import generic.test.AbstractGenericTest;
import ghidra.app.plugin.core.codebrowser.CodeBrowserPlugin;
import ghidra.app.plugin.core.marker.MarkerManagerPlugin;
import ghidra.app.plugin.core.programtree.ProgramTreePlugin;
import ghidra.app.plugin.core.symboltree.nodes.SymbolCategoryNode;
import ghidra.app.plugin.core.symboltree.nodes.SymbolNode;
import ghidra.app.services.ProgramManager;
import ghidra.framework.plugintool.PluginTool;
import ghidra.program.model.address.Address;
import ghidra.program.model.address.AddressSet;
import ghidra.program.model.listing.Program;
import ghidra.program.model.symbol.*;
import ghidra.test.AbstractGhidraHeadedIntegrationTest;
import ghidra.test.TestEnv;
/**
* Tests for the symbol tree plugin.
*/
public class SymbolTreePlugin1Test extends AbstractGhidraHeadedIntegrationTest {
private TestEnv env;
private PluginTool tool;
private Program program;
private SymbolTreePlugin plugin;
private DockingActionIf symTreeAction;
private CodeBrowserPlugin cbPlugin;
private GTreeNode rootNode;
private GTreeNode namespacesNode;
private GTree tree;
private Namespace globalNamespace;
private int index;
private DockingActionIf renameAction;
private DockingActionIf cutAction;
private DockingActionIf pasteAction;
private DockingActionIf deleteAction;
private DockingActionIf selectionAction;
private DockingActionIf createNamespaceAction;
private DockingActionIf createClassAction;
private DockingActionIf goToToggleAction;
private DockingActionIf goToExtLocAction;
private DockingActionIf createLibraryAction;
private DockingActionIf setExternalProgramAction;
private DockingActionIf createExternalLocationAction;
private DockingActionIf editExternalLocationAction;
private SymbolTreeTestUtils util;
@Before
public void setUp() throws Exception {
env = new TestEnv();
tool = env.getTool();
tool.addPlugin(ProgramTreePlugin.class.getName());
tool.addPlugin(CodeBrowserPlugin.class.getName());
tool.addPlugin(MarkerManagerPlugin.class.getName());
tool.addPlugin(SymbolTreePlugin.class.getName());
plugin = env.getPlugin(SymbolTreePlugin.class);
symTreeAction = getAction(plugin, "Symbol Tree");
cbPlugin = env.getPlugin(CodeBrowserPlugin.class);
util = new SymbolTreeTestUtils(plugin);
program = util.getProgram();
globalNamespace = program.getGlobalNamespace();
getActions();
env.showTool();
}
@After
public void tearDown() throws Exception {
closeProgram();
env.dispose();
}
@Test
public void testCloseCategoryIfOrgnodesGetOutOfBalance() throws Exception {
showSymbolTree();
GTreeNode functionsNode = rootNode.getChild("Functions");
assertFalse(functionsNode.isLoaded());
functionsNode.expand();
waitForTree(tree);
assertTrue(functionsNode.isLoaded());
// add lots of nodes to cause functionsNode to close
addFunctions(SymbolCategoryNode.MAX_NODES_BEFORE_CLOSING);
waitForTree(tree);
assertFalse(functionsNode.isLoaded());
functionsNode.expand();
waitForTree(tree);
// should have 4 nodes, one for each of the original 3 functions and a org node with
// all new "FUNCTION*" named functions
assertEquals(4, functionsNode.getChildCount());
}
private void addFunctions(int count) throws Exception {
tx(program, () -> {
for (int i = 0; i < count; i++) {
String name = "FUNCTION_" + i;
Address address = util.addr(0x1002000 + i);
AddressSet body = new AddressSet(address);
program.getListing().createFunction(name, address, body, SourceType.USER_DEFINED);
}
});
}
@Test
public void testShowDisplay() throws Exception {
showSymbolTree();
assertEquals(6, rootNode.getChildCount());
GTreeNode node = rootNode.getChild(0);
assertEquals("Imports", node.getName());
node = rootNode.getChild(1);
assertEquals("Exports", node.getName());
node = rootNode.getChild(2);
assertEquals("Functions", node.getName());
node = rootNode.getChild(3);
assertEquals("Labels", node.getName());
node = rootNode.getChild(4);
assertEquals("Classes", node.getName());
node = rootNode.getChild(5);
assertEquals("Namespaces", node.getName());
}
@Test
public void testExternals() throws Exception {
showSymbolTree();
List<?> list =
getChildren(globalNamespace, SymbolCategory.IMPORTS_CATEGORY.getSymbolType());
GTreeNode extNode = rootNode.getChild(0);
util.expandNode(extNode);
assertEquals(list.size(), extNode.getChildCount());
checkGTreeNodes(list, extNode);
GTreeNode node = extNode.getChild(0);
util.expandNode(node);
GTreeNode fNode = node.getChild(0);
util.selectNode(fNode);
assertTrue(goToExtLocAction.isEnabledForContext(util.getSymbolTreeContext()));
}
@Test
public void testGoToExternal() throws Exception {
showSymbolTree();
GTreeNode extNode = rootNode.getChild(0);
util.expandNode(extNode);
GTreeNode node = extNode.getChild(0);
util.expandNode(node);
GTreeNode fNode = node.getChild(0);
util.selectNode(fNode);
Symbol extSym = ((SymbolNode) fNode).getSymbol();
assertEquals(SymbolType.LABEL, extSym.getSymbolType());
assertTrue(extSym.isExternal());
assertNotNull(extSym);
assertEquals("IsTextUnicode", extSym.getName());
ExternalLocation extLoc = (ExternalLocation) extSym.getObject();
int transactionID = program.startTransaction("test");
try {
program.getExternalManager().setExternalPath(extLoc.getLibraryName(), null, true);
}
finally {
program.endTransaction(transactionID, true);
}
flushAndWaitForTree();
cbPlugin.updateNow();
// reselect - setting path rebuilt tree
extNode = rootNode.getChild(0);
node = extNode.getChild(0);
util.expandNode(node);
node = extNode.getChild(0);
fNode = node.getChild(0);
assertEquals("IsTextUnicode", fNode.getName());
util.selectNode(fNode);
TreePath selectionPath = tree.getSelectionPath();
assertNotNull(selectionPath);
Object selectedObject = selectionPath.getLastPathComponent();
assertEquals(fNode, selectedObject);
waitForPostedSwingRunnables();
performAction(goToExtLocAction, util.getSymbolTreeContext(), false);
waitForPostedSwingRunnables();
OptionDialog d = waitForDialogComponent(tool.getToolFrame(), OptionDialog.class, 2000);
assertNotNull(d);
pressButtonByText(d, "Cancel");
}
@Test
public void testFunctions() throws Exception {
showSymbolTree();
List<?> list =
getChildren(globalNamespace, SymbolCategory.FUNCTION_CATEGORY.getSymbolType());
GTreeNode fNode = rootNode.getChild(2);
util.expandNode(fNode);
assertEquals(list.size(), fNode.getChildCount());
checkGTreeNodes(list, fNode);
}
@Test
public void testLabels() throws Exception {
showSymbolTree();
List<?> list = getChildren(globalNamespace, SymbolCategory.LABEL_CATEGORY.getSymbolType());
GTreeNode labelNode = rootNode.getChild(3);
util.expandNode(labelNode);
for (int i = 0; i < labelNode.getChildCount(); i++) {
GTreeNode node = labelNode.getChild(i);
util.expandNode(node);
}
checkLabelNodes(list, labelNode);
}
@Test
public void testGlobalSymCategoryActionEnablement() throws Exception {
// select the root node (BTW - node is not visible) only Create Library should show up
showSymbolTree();
util.selectNode(rootNode);
ActionContext context = util.getSymbolTreeContext();
assertTrue(createLibraryAction.isEnabledForContext(context));
assertTrue(!createClassAction.isEnabledForContext(context));
assertTrue(!createNamespaceAction.isEnabledForContext(context));
assertTrue(!renameAction.isEnabledForContext(context));
assertTrue(!cutAction.isEnabledForContext(context));
assertTrue(!pasteAction.isEnabledForContext(context));
assertTrue(!deleteAction.isEnabledForContext(context));
assertTrue(!selectionAction.isEnabledForContext(context));
assertTrue(!goToExtLocAction.isEnabledForContext(context));
assertTrue(!goToExtLocAction.isEnabledForContext(context));
}
@Test
public void testPasteActionEnabled() throws Exception {
showSymbolTree();
// cut label from a function
// select Global; paste should be enabled
GTreeNode fNode = rootNode.getChild(2);
util.expandNode(fNode);
GTreeNode gNode = fNode.getChild(1);
util.expandNode(gNode);
GTreeNode node = gNode.getChild(9);
util.selectNode(node);
performAction(cutAction, util.getSymbolTreeContext(), true);
util.selectNode(rootNode);
assertTrue(pasteAction.isEnabledForContext(util.getSymbolTreeContext()));
// move a function to a namespace
// cut a function; select global; paste should be enabled
GTreeNode nsParentNode = rootNode.getChild(5);
GTreeNode nsNode = util.createObject(nsParentNode, "MyNamespace", createNamespaceAction);
doDrag(nsNode, gNode, DnDConstants.ACTION_MOVE);
util.waitForTree();
flushAndWaitForTree();
nsParentNode = rootNode.getChild(5);
nsNode = nsParentNode.getChild(0);
util.expandNode(nsNode);
util.waitForTree();
waitForPostedSwingRunnables();
util.waitForTree();
gNode = nsNode.getChild(0);
if (gNode == null) {
if (tree.isExpanded(nsNode.getTreePath())) {
gNode = nsNode.getChild(0);
}
}
assertNotNull(gNode);
gNode = nsNode.getChild(0);
util.selectNode(gNode);
assertTrue(cutAction.isEnabledForContext(util.getSymbolTreeContext()));
performAction(cutAction, util.getSymbolTreeContext(), true);
// select the root node
util.selectNode(rootNode);
assertTrue(pasteAction.isEnabledForContext(util.getSymbolTreeContext()));
}
@Test
public void testPasteActionEnabled2() throws Exception {
showSymbolTree();
// cut label from Global
// select a function; paste should be enabled because it's address is within the function
SymbolTable symTable = program.getSymbolTable();
// create label within body of ghidra function
int transactionID = program.startTransaction("test");
symTable.createLabel(util.addr(0x01002d04), "fred", SourceType.USER_DEFINED);
program.endTransaction(transactionID, true);
flushAndWaitForTree();
GTreeNode fNode = rootNode.getChild(2);
util.expandNode(fNode);
GTreeNode labelsNode = rootNode.getChild("Labels");
GTreeNode namespaceNode = rootNode.getChild("Namespaces");
util.selectNode(namespaceNode);
performAction(createNamespaceAction, util.getSymbolTreeContext(), true);
util.waitForTree();
tree.stopEditing();
GTreeNode fredNode = labelsNode.getChild("fred");
util.selectNode(fredNode);
waitForPostedSwingRunnables();
assertTrue(cutAction.isEnabledForContext(util.getSymbolTreeContext()));
performAction(cutAction, util.getSymbolTreeContext(), true);
GTreeNode gNode = namespaceNode.getChild(0);
util.selectNode(gNode);
assertTrue(pasteAction.isEnabledForContext(util.getSymbolTreeContext()));
GTreeNode dNode = fNode.getChild(0);
util.selectNode(dNode);
assertTrue(!pasteAction.isEnabledForContext(util.getSymbolTreeContext()));
}
@Test
public void testPasteActionEnabled3() throws Exception {
showSymbolTree();
// move function to other namespace
// select this function; paste should be enabled for Functions node
GTreeNode functionsNode = rootNode.getChild(2);
util.expandNode(functionsNode);
String doStuffNodeName = "doStuff";
GTreeNode doStuffNode = functionsNode.getChild(doStuffNodeName);
util.expandNode(doStuffNode);
GTreeNode newNamespaceNode =
util.createObject(namespacesNode, "MyNamespace", createNamespaceAction);
doDrag(newNamespaceNode, doStuffNode, DnDConstants.ACTION_MOVE);
newNamespaceNode = namespacesNode.getChild("MyNamespace");
flushAndWaitForTree();
GTreeNode draggedDoStuffNode = newNamespaceNode.getChild(doStuffNodeName);
util.selectNode(draggedDoStuffNode);
// clear clipboard
util.clearClipboard();
assertTrue(cutAction.isEnabledForContext(util.getSymbolTreeContext()));
performAction(cutAction, util.getSymbolTreeContext(), true);
// make sure action executed
assertNotNull(util.getClipboardContents());
util.waitForTree();
waitForPostedSwingRunnables();
util.selectNode(functionsNode);
util.waitForTree();
waitForPostedSwingRunnables();
// verify node selected
assertEquals("Node not selected.", functionsNode, util.getSelectedNode());
assertTrue(pasteAction.isEnabledForContext(util.getSymbolTreeContext()));
}
@Test
public void testSymCategoryActionEnablement() throws Exception {
// select the external symbol category;
// no actions should be applicable
showSymbolTree();
GTreeNode extNode = rootNode.getChild(0);
util.selectNode(extNode);
ActionContext context = util.getSymbolTreeContext();
boolean createLibraryIsEnabled = createLibraryAction.isEnabledForContext(context);
if (extNode.getName().equals("Imports")) {
assertTrue(createLibraryIsEnabled);
}
else {
assertFalse(createLibraryIsEnabled);
}
assertTrue(!createClassAction.isEnabledForContext(context));
assertTrue(!createNamespaceAction.isEnabledForContext(context));
assertTrue(!renameAction.isEnabledForContext(context));
assertTrue(!renameAction.isEnabledForContext(context));
assertTrue(!cutAction.isEnabledForContext(context));
assertTrue(!cutAction.isEnabledForContext(context));
assertTrue(!pasteAction.isEnabledForContext(context));
assertTrue(!pasteAction.isEnabledForContext(context));
assertTrue(!deleteAction.isEnabledForContext(context));
assertTrue(!deleteAction.isEnabledForContext(context));
assertTrue(!selectionAction.isEnabledForContext(context));
assertTrue(!selectionAction.isEnabledForContext(context));
GTreeNode lNode = rootNode.getChild(1);
util.selectNode(lNode);
context = util.getSymbolTreeContext();
assertTrue(!createLibraryAction.isEnabledForContext(context));
assertTrue(!createClassAction.isEnabledForContext(context));
assertTrue(!createNamespaceAction.isEnabledForContext(context));
assertTrue(!renameAction.isEnabledForContext(context));
assertTrue(!renameAction.isEnabledForContext(context));
assertTrue(!cutAction.isEnabledForContext(context));
assertTrue(!cutAction.isEnabledForContext(context));
assertTrue(!pasteAction.isEnabledForContext(context));
assertTrue(!pasteAction.isEnabledForContext(context));
assertTrue(!deleteAction.isEnabledForContext(context));
assertTrue(!deleteAction.isEnabledForContext(context));
assertTrue(!selectionAction.isEnabledForContext(context));
assertTrue(!selectionAction.isEnabledForContext(context));
}
@Test
public void testParameterActionEnablement() throws Exception {
showSymbolTree();
GTreeNode fNode = rootNode.getChild(2);
util.expandNode(fNode);
GTreeNode gNode = fNode.getChild(1);
util.expandNode(gNode);
GTreeNode pNode = gNode.getChild(0);
util.selectNode(pNode);
ActionContext context = util.getSymbolTreeContext();
assertTrue(!cutAction.isEnabledForContext(context));
assertTrue(!pasteAction.isEnabledForContext(context));
assertTrue(renameAction.isEnabledForContext(context));
assertTrue(renameAction.isEnabledForContext(context));
assertTrue(selectionAction.isEnabledForContext(context));
assertTrue(deleteAction.isEnabledForContext(context));
}
@Test
public void testFunctionActionEnablement() throws Exception {
showSymbolTree();
GTreeNode fNode = rootNode.getChild(2);
util.expandNode(fNode);
GTreeNode gNode = fNode.getChild(1);
util.expandNode(gNode);
util.selectNode(gNode);
ActionContext context = util.getSymbolTreeContext();
assertTrue(cutAction.isEnabledForContext(context));
assertTrue(!pasteAction.isEnabledForContext(context));
assertTrue(renameAction.isEnabledForContext(context));
assertTrue(renameAction.isEnabledForContext(context));
assertTrue(selectionAction.isEnabledForContext(context));
assertTrue(deleteAction.isEnabledForContext(context));
}
@Test
public void testLocalSymbolActionEnablement() throws Exception {
showSymbolTree();
GTreeNode fNode = rootNode.getChild(2);
util.expandNode(fNode);
GTreeNode gNode = fNode.getChild(1);
util.expandNode(gNode);
GTreeNode pNode = gNode.getChild(9);
util.selectNode(pNode);
ActionContext context = util.getSymbolTreeContext();
assertTrue(cutAction.isEnabledForContext(context));
assertTrue(!pasteAction.isEnabledForContext(context));
assertTrue(renameAction.isEnabledForContext(context));
assertTrue(renameAction.isEnabledForContext(context));
assertTrue(selectionAction.isEnabledForContext(context));
assertTrue(deleteAction.isEnabledForContext(context));
}
@Test
public void testCreateNamespace() throws Exception {
showSymbolTree();
GTreeNode newNsNode = createNewNamespace();
//
// Also, check the editors contents
//
TreePath path = newNsNode.getTreePath();
int row = tree.getRowForPath(path);
DefaultTreeCellEditor cellEditor = (DefaultTreeCellEditor) tree.getCellEditor();
JTree jTree = (JTree) AbstractGenericTest.getInstanceField("tree", tree);
Container container = (Container) cellEditor.getTreeCellEditorComponent(jTree, newNsNode,
true, true, true, row);
JTextField textField = (JTextField) container.getComponent(0);
assertEquals("NewNamespace", textField.getText());
}
@Test
public void testRenameNamespace() throws Exception {
showSymbolTree();
GTreeNode newNsNode = createNewNamespace();
util.selectNode(newNsNode);
renameSelectedNode();
TreePath path = newNsNode.getTreePath();
GTreeNode nsNode = newNsNode;
String newName = "MyNamespace";
setEditorText(path, nsNode, newName);
namespacesNode = rootNode.getChild("Namespaces");
GTreeNode renamedNode = namespacesNode.getChild(newName);
assertNotNull(renamedNode);
Symbol s = ((SymbolNode) newNsNode).getSymbol();
assertEquals(newName, s.getName());
}
@Test
public void testCreateClass() throws Exception {
showSymbolTree();
util.selectNode(rootNode.getChild("Classes"));
performAction(createClassAction, util.getSymbolTreeContext(), true);
GTreeNode cnode = rootNode.getChild(4);
util.expandNode(cnode);
// wait until NewClass gets added
GTreeNode newNode = waitForValue(() -> cnode.getChild(0));
assertNotNull(newNode);
Symbol s = ((SymbolNode) newNode).getSymbol();
assertEquals("NewClass", s.getName());
TreePath path = newNode.getTreePath();
int row = tree.getRowForPath(path);
JTree jTree = (JTree) AbstractGenericTest.getInstanceField("tree", tree);
JTextField tf = runSwing(() -> {
DefaultTreeCellEditor cellEditor = (DefaultTreeCellEditor) tree.getCellEditor();
Container container = (Container) cellEditor.getTreeCellEditorComponent(jTree, newNode,
true, true, true, row);
JTextField textField = (JTextField) container.getComponent(0);
return textField;
});
assertEquals("NewClass", tf.getText());
}
@Test
public void testCreateClassInNamespace() throws Exception {
showSymbolTree();
GTreeNode nsParentNode = rootNode.getChild(5);
util.selectNode(nsParentNode);
GTreeNode nsNode = util.createObject(nsParentNode, "MyNamespace", createNamespaceAction);
GTreeNode cNode = util.createObject(nsNode, "MyClass", createClassAction);
Symbol s = ((SymbolNode) cNode).getSymbol();
assertEquals("MyClass", s.getName());
}
@Test
public void testRenameExternalLib() throws Exception {
showSymbolTree();
GTreeNode extNode = rootNode.getChild(0);
util.expandNode(extNode);
GTreeNode advNode = extNode.getChild(0);
util.rename(advNode, "MyADVAI32.dll");
Symbol s = ((SymbolNode) advNode).getSymbol();
assertEquals("MyADVAI32.dll", s.getName());
}
@Test
public void testRenameExternalFunction() throws Exception {
showSymbolTree();
GTreeNode extNode = rootNode.getChild(0);
util.expandNode(extNode);
GTreeNode advNode = extNode.getChild(0);
util.expandNode(advNode);
GTreeNode regNode = advNode.getChild(1);
util.rename(regNode, "MyRegCloseKey");
Symbol s = ((SymbolNode) regNode).getSymbol();
assertEquals("MyRegCloseKey", s.getName());
}
@Test
public void testRenameLabel() throws Exception {
showSymbolTree();
GTreeNode labelNode = rootNode.getChild(3);
util.expandNode(labelNode);
GTreeNode isTextUnicodeNode = labelNode.getChild("ADVAPI32.dll_IsTextUnicode");
Symbol s = ((SymbolNode) isTextUnicodeNode).getSymbol();
String oldName = s.getName();
String newName = "MY" + s.getName();
util.rename(isTextUnicodeNode, newName);
util.waitForTree();
assertEquals(newName, s.getName());
GTreeNode renamedNode = labelNode.getChild("MYADVAPI32.dll_IsTextUnicode");
assertNotNull(renamedNode);
// undo/redo
undo(program);
util.waitForTree();
labelNode = rootNode.getChild(3);
isTextUnicodeNode = labelNode.getChild("ADVAPI32.dll_IsTextUnicode");
s = ((SymbolNode) isTextUnicodeNode).getSymbol();
assertEquals(oldName, s.getName());
redo(program);
util.waitForTree();
labelNode = rootNode.getChild(3);
renamedNode = labelNode.getChild("MYADVAPI32.dll_IsTextUnicode");
s = ((SymbolNode) renamedNode).getSymbol();
assertEquals(newName, s.getName());
}
@Test
public void testRenameLabelWithNamespace() throws Exception {
//
// The user can type a name with a namespace during a rename. The format is:
// ns1::ns2::name
//
// This will create a new node under the Namespaces node
//
showSymbolTree();
GTreeNode labelNode = rootNode.getChild(3);
util.expandNode(labelNode);
String advapiName = "ADVAPI32.dll_IsTextUnicode";
GTreeNode advapi32Node = labelNode.getChild(advapiName);
Symbol s = ((SymbolNode) advapi32Node).getSymbol();
String newNamespace = "bob";
String prefix = "MY";
String newNameWithoutNamespace = prefix + s.getName();
String newName = newNamespace + Namespace.DELIMITER + newNameWithoutNamespace;
util.rename(advapi32Node, newName);
util.waitForTree();
assertEquals(newNameWithoutNamespace, s.getName());
GTreeNode newNamespaceNode = namespacesNode.getChild(newNamespace);
assertNotNull(newNamespaceNode);
GTreeNode renamedNode = newNamespaceNode.getChild(newNameWithoutNamespace);
assertNotNull(renamedNode);
assertEquals("MYADVAPI32.dll_IsTextUnicode", renamedNode.toString());
Symbol renamedSymbol = ((SymbolNode) renamedNode).getSymbol();
Namespace parentNamespace = renamedSymbol.getParentNamespace();
String currentNamespaceString = parentNamespace.getName(true);
assertEquals(newNamespace, currentNamespaceString);
}
@Test
public void testRenameNamespaceWithNamespace() throws Exception {
showSymbolTree();
GTreeNode newNsNode = createNewNamespace();
util.selectNode(newNsNode);
renameSelectedNode();
TreePath path = newNsNode.getTreePath();
GTreeNode nsNode = newNsNode;
String newNamespace = "OuterNamespace";
String newName = "MyNamespace";
String newFullName = newNamespace + Namespace.DELIMITER + newName;
setEditorText(path, nsNode, newFullName);
namespacesNode = rootNode.getChild("Namespaces");
GTreeNode newNamespaceNode = namespacesNode.getChild(newNamespace);
assertNotNull(newNamespaceNode);
GTreeNode renamedNode = newNamespaceNode.getChild(newName);
assertNotNull(renamedNode);
Symbol s = ((SymbolNode) newNsNode).getSymbol();
assertEquals(newName, s.getName());
}
@Test
public void testRenameParameter() throws Exception {
showSymbolTree();
GTreeNode fNode = rootNode.getChild(2);
util.expandNode(fNode);
GTreeNode gNode = fNode.getChild(1);
util.expandNode(gNode);
GTreeNode pNode = gNode.getChild(2);
Symbol s = ((SymbolNode) pNode).getSymbol();
String newName = "MY" + s.getName();
util.rename(pNode, newName);
assertEquals(newName, s.getName());
}
@Test
public void testRenameLocalLabel() throws Exception {
showSymbolTree();
GTreeNode fNode = rootNode.getChild(2);
util.expandNode(fNode);
GTreeNode gNode = fNode.getChild(1);
util.expandNode(gNode);
GTreeNode node = gNode.getChild(5);
Symbol s = ((SymbolNode) node).getSymbol();
String newName = "MY" + s.getName();
util.rename(node, newName);
assertEquals(newName, s.getName());
}
@Test
public void testProgramClosed() throws Exception {
showSymbolTree();
closeProgram();
assertTrue(tool.isVisible(util.getProvider()));
}
//==================================================================================================
// Private Methods
//==================================================================================================
private void doDrag(final GTreeNode destinationNode, GTreeNode dragNode, final int dragAction) {
final GTreeDragNDropHandler dragNDropHandler = tree.getDragNDropHandler();
List<GTreeNode> dropList = new ArrayList<>();
dropList.add(dragNode);
final Transferable transferable = new GTreeNodeTransferable(dragNDropHandler, dropList);
executeOnSwingWithoutBlocking(
() -> dragNDropHandler.drop(destinationNode, transferable, dragAction));
waitForPostedSwingRunnables();
}
private GTreeNode createNewNamespace() throws Exception {
util.selectNode(namespacesNode);
util.waitForTree();
performAction(createNamespaceAction, util.getSymbolTreeContext(), false);
util.waitForTree();
GTreeNode nsnode = rootNode.getChild("Namespaces");// get again, as its been modified
waitForEditing();
stopEditing();
GTreeNode newNode = nsnode.getChild("NewNamespace");
assertNotNull("New node not created", newNode);
return newNode;
}
private void waitForEditing() throws Exception {
int cnt = 0;
while (!tree.isEditing()) {
Thread.sleep(100);
assertTrue("Timed-out waiting for tree to edit", ++cnt < 50);
}
}
private void stopEditing() throws Exception {
SwingUtilities.invokeAndWait(() -> tree.stopEditing());
}
private void renameSelectedNode() throws Exception {
SwingUtilities.invokeAndWait(
() -> renameAction.actionPerformed(util.getSymbolTreeContext()));
waitForEditing();
}
private void setEditorText(final TreePath path, final GTreeNode nsNode, final String newName)
throws InterruptedException, InvocationTargetException {
SwingUtilities.invokeAndWait(() -> {
int row = tree.getRowForPath(path);
DefaultTreeCellEditor cellEditor = (DefaultTreeCellEditor) tree.getCellEditor();
JTree jTree = (JTree) AbstractGenericTest.getInstanceField("tree", tree);
Container container = (Container) cellEditor.getTreeCellEditorComponent(jTree, nsNode,
true, true, true, row);
JTextField textField = (JTextField) container.getComponent(0);
textField.setText(newName);
tree.stopEditing();
});
flushAndWaitForTree();
}
private void closeProgram() throws Exception {
final ProgramManager pm = tool.getService(ProgramManager.class);
SwingUtilities.invokeAndWait(() -> pm.closeProgram());
}
private void showSymbolTree() throws Exception {
util.showSymbolTree();
rootNode = util.getRootNode();
namespacesNode = rootNode.getChild("Namespaces");
tree = util.getTree();
}
private void getActions() throws Exception {
renameAction = getAction(plugin, "Rename Symbol");
assertNotNull(renameAction);
cutAction = getAction(plugin, "Cut SymbolTree Node");
assertNotNull(cutAction);
pasteAction = getAction(plugin, "Paste Symbols");
assertNotNull(pasteAction);
deleteAction = getAction(plugin, "Delete Symbols");
assertNotNull(deleteAction);
selectionAction = getAction(plugin, "Make Selection");
assertNotNull(selectionAction);
createClassAction = getAction(plugin, "Create Class");
assertNotNull(createClassAction);
createNamespaceAction = getAction(plugin, "Create Namespace");
assertNotNull(createNamespaceAction);
createLibraryAction = getAction(plugin, "Create Library");
assertNotNull(createLibraryAction);
setExternalProgramAction = getAction(plugin, "Set External Program");
assertNotNull(setExternalProgramAction);
createExternalLocationAction = getAction(plugin, "Create External Location");
assertNotNull(createExternalLocationAction);
editExternalLocationAction = getAction(plugin, "Edit External Location");
assertNotNull(editExternalLocationAction);
goToToggleAction = getAction(plugin, "Navigation");
assertNotNull(goToToggleAction);
goToExtLocAction = getAction(plugin, "Go To External Location");
assertNotNull(goToExtLocAction);
}
private List<?> getChildren(Namespace namespace, SymbolType type) {
List<Symbol> list = new ArrayList<>();
SymbolIterator it = program.getSymbolTable().getSymbols(namespace);
while (it.hasNext()) {
Symbol s = it.next();
if (s.getSymbolType() == type) {
if (type != SymbolType.LABEL || s.isGlobal()) {
list.add(s);
}
}
}
Collections.sort(list, util.getSymbolComparator());
return list;
}
private List<?> getChildSymbols(Symbol symbol) {
SymbolType type = symbol.getSymbolType();
List<Symbol> list = new ArrayList<>();
SymbolTable symbolTable = program.getSymbolTable();
SymbolIterator iter = symbolTable.getChildren(symbol);
while (iter.hasNext()) {
list.add(iter.next());
}
Collections.sort(list, (type == SymbolType.FUNCTION) ? util.getFunctionComparator()
: util.getSymbolComparator());
return list;
}
private void checkGTreeNodes(List<?> symbolList, GTreeNode parentNode) throws Exception {
for (int i = 0; i < symbolList.size(); i++) {
Symbol s = (Symbol) symbolList.get(i);
GTreeNode node = parentNode.getChild(i);
assertEquals(s, ((SymbolNode) node).getSymbol());
List<Object> nodeList = new ArrayList<>();
if (!node.isLeaf()) {
util.expandAll(node, nodeList);
List<?> subList = getChildSymbols(s);
assertEquals(subList.size(), nodeList.size());
for (int j = 0; j < subList.size(); j++) {
s = (Symbol) subList.get(j);
GTreeNode dNode = (GTreeNode) nodeList.get(j);
assertEquals(s, ((SymbolNode) dNode).getSymbol());
}
}
}
}
private void checkLabelNodes(List<?> symbolList, GTreeNode parentNode) {
for (int i = 0; i < parentNode.getChildCount(); i++) {
Symbol s = (Symbol) symbolList.get(index);
GTreeNode node = parentNode.getChild(i);
if (node instanceof SymbolNode) {
assertEquals(s, ((SymbolNode) node).getSymbol());
++index;
}
else {
checkLabelNodes(symbolList, node);
}
}
}
private void flushAndWaitForTree() {
program.flushEvents();
waitForPostedSwingRunnables();
util.waitForTree();
}
}
| apache-2.0 |
zhanmer/pipeline-mongo | utils.py | 1556 | import urllib
import http.cookiejar as cookielib
import io
import gzip
import base64
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "utf-8",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
class http:
def __init__(self, baseUrl, apiKey):
self.baseUrl = baseUrl
self.apiKey = apiKey
cookies = cookielib.CookieJar()
self.opener = urllib.request.build_opener(urllib.request.HTTPHandler(),
urllib.request.HTTPRedirectHandler(),
urllib.request.HTTPCookieProcessor(cookies)
)
def getPageFromUrl(self, pageUrl):
request = urllib.request.Request(url = pageUrl, headers = headers)
with self.opener.open(request) as response:
return self.getResponseBody(response)
def getResponseBody(self, response):
encoding = response.info().get("Content-Encoding")
if encoding in ("gzip", "x-gzip", "deflate"):
page = response.read()
if encoding == "deflate":
return zlib.decompress(page).decode('UTF-8')
else:
fd = io.BytesIO(page)
try:
data = gzip.GzipFile(fileobj = fd)
try: content = data.read().decode('UTF-8')
finally: data.close()
finally:
fd.close()
return content
else:
return response.read().decode()
def getAwsFile(self, pageUrl):
try:
request = urllib.request.Request(url = pageUrl, headers = headers)
with self.opener.open(request) as response:
return response.read()
except urllib.error.HTTPError:
return None | apache-2.0 |
StrongMonkey/agent | service/hostapi/exec/exec.go | 3252 | package exec
import (
"encoding/base64"
"io"
"net/url"
log "github.com/Sirupsen/logrus"
"github.com/rancher/websocket-proxy/backend"
"github.com/rancher/websocket-proxy/common"
"github.com/docker/distribution/context"
"github.com/docker/docker/api/types"
"github.com/rancher/agent/service/hostapi/auth"
"github.com/rancher/agent/service/hostapi/events"
"runtime"
)
type Handler struct {
}
func (h *Handler) Handle(key string, initialMessage string, incomingMessages <-chan string, response chan<- common.Message) {
defer backend.SignalHandlerClosed(key, response)
requestURL, err := url.Parse(initialMessage)
if err != nil {
log.WithFields(log.Fields{"error": err, "url": initialMessage}).Error("Couldn't parse url.")
return
}
tokenString := requestURL.Query().Get("token")
token, valid := auth.GetAndCheckToken(tokenString)
if !valid {
return
}
execMap := token.Claims["exec"].(map[string]interface{})
execConfig, id := convert(execMap)
client, err := events.NewDockerClient()
if err != nil {
log.WithFields(log.Fields{"error": err}).Error("Couldn't get docker client.")
return
}
execObj, err := client.ContainerExecCreate(context.Background(), id, execConfig)
if err != nil {
return
}
hijackResp, err := client.ContainerExecAttach(context.Background(), execObj.ID, execConfig)
if err != nil {
return
}
go func(w io.WriteCloser) {
for {
msg, ok := <-incomingMessages
if !ok {
if _, err := w.Write([]byte("\x04")); err != nil {
log.WithFields(log.Fields{"error": err}).Error("Error writing EOT message.")
}
w.Close()
return
}
data, err := base64.StdEncoding.DecodeString(msg)
if err != nil {
log.WithFields(log.Fields{"error": err}).Error("Error decoding message.")
continue
}
w.Write([]byte(data))
}
}(hijackResp.Conn)
go func(r io.Reader) {
buffer := make([]byte, 4096, 4096)
for {
c, err := r.Read(buffer)
if c > 0 {
text := base64.StdEncoding.EncodeToString(buffer[:c])
message := common.Message{
Key: key,
Type: common.Body,
Body: text,
}
response <- message
}
if err != nil {
break
}
}
}(hijackResp.Reader)
select {}
}
func convert(execMap map[string]interface{}) (types.ExecConfig, string) {
// Not fancy at all
config := types.ExecConfig{}
containerID := ""
if param, ok := execMap["AttachStdin"]; ok {
if val, ok := param.(bool); ok {
config.AttachStdin = val
}
}
if param, ok := execMap["AttachStdout"]; ok {
if val, ok := param.(bool); ok {
config.AttachStdout = val
}
}
if param, ok := execMap["AttachStderr"]; ok {
if val, ok := param.(bool); ok {
config.AttachStderr = val
}
}
if param, ok := execMap["Tty"]; ok {
if val, ok := param.(bool); ok {
config.Tty = val
}
}
if param, ok := execMap["Container"]; ok {
if val, ok := param.(string); ok {
containerID = val
}
}
if param, ok := execMap["Cmd"]; ok {
cmd := []string{}
if list, ok := param.([]interface{}); ok {
for _, item := range list {
if val, ok := item.(string); ok {
cmd = append(cmd, val)
}
}
}
config.Cmd = cmd
}
if runtime.GOOS == "windows" {
config.Cmd = []string{"powershell"}
}
return config, containerID
}
| apache-2.0 |
shuodata/deeplearning4j | deeplearning4j-nn/src/main/java/org/deeplearning4j/berkeley/SloppyMath.java | 35547 | /*-
*
* * Copyright 2015 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.deeplearning4j.berkeley;
import java.util.List;
import java.util.Map;
/**
* The class <code>SloppyMath</code> contains methods for performing basic
* numeric operations. In some cases, such as max and min, they cut a few
* corners in the implementation for the sake of efficiency. In particular, they
* may not handle special notions like NaN and -0.0 correctly. This was the
* origin of the class name, but some other operations are just useful math
* additions, such as logSum.
*
* @author Christopher Manning
* @version 2003/01/02
*/
public final class SloppyMath {
private SloppyMath() {}
public static double abs(double x) {
if (x > 0)
return x;
return -1.0 * x;
}
public static double lambert(double v, double u) {
double x = -(Math.log(-v) + u);//-Math.log(-z);
double w = -x;
double diff = 1;
while (Math.abs(diff) < 1.0e-5) {
double z = -x - Math.log(Math.abs(w));
diff = z - w;
w = z;
}
return w;
/*
//Use asymptotic expansion w = log(z) - log(log(z)) for most z
double summand = (z==0) ? 1 : 0;
double tmp = Math.log(z+summand);// + i*b*6.28318530717958648;
double w = tmp - Math.log(tmp + summand);
//For b = 0, use a series expansion when close to the branch point
//k = find(b == 0 & abs(z + 0.3678794411714423216) <= 1.5);
tmp = Math.sqrt(5.43656365691809047*z + 2) - 1;// + i*b*6.28318530717958648;
//w(k) = tmp(k);
w = tmp;
for (int k=1; k<36; k++){
// Converge with Halley's iterations, about 5 iterations satisfies
//the tolerance for most z
double c1 = Math.exp(w);
double c2 = w*c1 - z;
summand = (w != -1) ? 1 : 0;
double w1 = w + summand;
double dw = c2/(c1*w1 - ((w + 2)*c2/(2*w1)));
w = w - dw;
if (Math.abs(dw) < 0.7e-16*(2+Math.abs(w)))
break;
}
return w;*/
}
/**
* Returns the minimum of three int values.
*/
public static int max(int a, int b, int c) {
int ma;
ma = a;
if (b > ma) {
ma = b;
}
if (c > ma) {
ma = c;
}
return ma;
}
/**
* Returns the minimum of three int values.
*/
public static int min(int a, int b, int c) {
int mi;
mi = a;
if (b < mi) {
mi = b;
}
if (c < mi) {
mi = c;
}
return mi;
}
/**
* Returns the greater of two <code>float</code> values. That is, the
* result is the argument closer to positive infinity. If the arguments have
* the same value, the result is that same value. Does none of the special
* checks for NaN or -0.0f that <code>Math.max</code> does.
*
* @param a
* an argument.
* @param b
* another argument.
* @return the larger of <code>a</code> and <code>b</code>.
*/
public static float max(float a, float b) {
return (a >= b) ? a : b;
}
/**
* Returns the greater of two <code>double</code> values. That is, the
* result is the argument closer to positive infinity. If the arguments have
* the same value, the result is that same value. Does none of the special
* checks for NaN or -0.0f that <code>Math.max</code> does.
*
* @param a
* an argument.
* @param b
* another argument.
* @return the larger of <code>a</code> and <code>b</code>.
*/
public static double max(double a, double b) {
return (a >= b) ? a : b;
}
/**
* Returns the smaller of two <code>float</code> values. That is, the
* result is the value closer to negative infinity. If the arguments have
* the same value, the result is that same value. Does none of the special
* checks for NaN or -0.0f that <code>Math.max</code> does.
*
* @param a
* an argument.
* @param b
* another argument.
* @return the smaller of <code>a</code> and <code>b.</code>
*/
public static float min(float a, float b) {
return (a <= b) ? a : b;
}
/**
* Returns the smaller of two <code>double</code> values. That is, the
* result is the value closer to negative infinity. If the arguments have
* the same value, the result is that same value. Does none of the special
* checks for NaN or -0.0f that <code>Math.max</code> does.
*
* @param a
* an argument.
* @param b
* another argument.
* @return the smaller of <code>a</code> and <code>b</code>.
*/
public static double min(double a, double b) {
return (a <= b) ? a : b;
}
/**
* Returns true if the argument is a "dangerous" double to have around,
* namely one that is infinite, NaN or zero.
*/
public static boolean isDangerous(double d) {
return Double.isInfinite(d) || Double.isNaN(d) || d == 0.0;
}
public static boolean isDangerous(float d) {
return Float.isInfinite(d) || Float.isNaN(d) || d == 0.0;
}
public static boolean isGreater(double x, double y) {
if (x > 1)
return (((x - y) / x) > -0.01);
return ((x - y) > -0.0001);
}
/**
* Returns true if the argument is a "very dangerous" double to have around,
* namely one that is infinite or NaN.
*/
public static boolean isVeryDangerous(double d) {
return Double.isInfinite(d) || Double.isNaN(d);
}
public static double relativeDifferance(double a, double b) {
a = Math.abs(a);
b = Math.abs(b);
double absMin = Math.min(a, b);
return Math.abs(a - b) / absMin;
}
public static boolean isDiscreteProb(double d, double tol) {
return d >= 0.0 && d <= 1.0 + tol;
}
/**
* If a difference is bigger than this in log terms, then the sum or
* difference of them will just be the larger (to 12 or so decimal places
* for double, and 7 or 8 for float).
*/
public static final double LOGTOLERANCE = 30.0;
static final float LOGTOLERANCE_F = 10.0f;
/**
* Returns the log of the sum of two numbers, which are themselves input in
* log form. This uses natural logarithms. Reasonable care is taken to do
* this as efficiently as possible (under the assumption that the numbers
* might differ greatly in magnitude), with high accuracy, and without
* numerical overflow. Also, handle correctly the case of arguments being
* -Inf (e.g., probability 0).
*
* @param lx
* First number, in log form
* @param ly
* Second number, in log form
* @return log(exp(lx) + exp(ly))
*/
public static float logAdd(float lx, float ly) {
float max, negDiff;
if (lx > ly) {
max = lx;
negDiff = ly - lx;
} else {
max = ly;
negDiff = lx - ly;
}
if (max == Double.NEGATIVE_INFINITY || negDiff < -LOGTOLERANCE_F) {
return max;
} else {
return max + (float) Math.log(1.0f + Math.exp(negDiff));
}
}
/**
* Returns the log of the sum of two numbers, which are themselves input in
* log form. This uses natural logarithms. Reasonable care is taken to do
* this as efficiently as possible (under the assumption that the numbers
* might differ greatly in magnitude), with high accuracy, and without
* numerical overflow. Also, handle correctly the case of arguments being
* -Inf (e.g., probability 0).
*
* @param lx
* First number, in log form
* @param ly
* Second number, in log form
* @return log(exp(lx) + exp(ly))
*/
public static double logAdd(double lx, double ly) {
double max, negDiff;
if (lx > ly) {
max = lx;
negDiff = ly - lx;
} else {
max = ly;
negDiff = lx - ly;
}
if (max == Double.NEGATIVE_INFINITY || negDiff < -LOGTOLERANCE) {
return max;
} else {
return max + Math.log(1.0 + Math.exp(negDiff));
}
}
public static double logAdd(float[] logV) {
double maxIndex = 0;
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < logV.length; i++) {
if (logV[i] > max) {
max = logV[i];
maxIndex = i;
}
}
if (max == Double.NEGATIVE_INFINITY)
return Double.NEGATIVE_INFINITY;
// compute the negative difference
double threshold = max - LOGTOLERANCE;
double sumNegativeDifferences = 0.0;
for (int i = 0; i < logV.length; i++) {
if (i != maxIndex && logV[i] > threshold) {
sumNegativeDifferences += Math.exp(logV[i] - max);
}
}
if (sumNegativeDifferences > 0.0) {
return max + Math.log(1.0 + sumNegativeDifferences);
} else {
return max;
}
}
public static void logNormalize(double[] logV) {
double logSum = logAdd(logV);
if (Double.isNaN(logSum)) {
throw new RuntimeException("Bad log-sum");
}
if (logSum == 0.0)
return;
for (int i = 0; i < logV.length; i++) {
logV[i] -= logSum;
}
}
public static double logAdd(double[] logV) {
double maxIndex = 0;
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < logV.length; i++) {
if (logV[i] > max) {
max = logV[i];
maxIndex = i;
}
}
if (max == Double.NEGATIVE_INFINITY)
return Double.NEGATIVE_INFINITY;
// compute the negative difference
double threshold = max - LOGTOLERANCE;
double sumNegativeDifferences = 0.0;
for (int i = 0; i < logV.length; i++) {
if (i != maxIndex && logV[i] > threshold) {
sumNegativeDifferences += Math.exp(logV[i] - max);
}
}
if (sumNegativeDifferences > 0.0) {
return max + Math.log(1.0 + sumNegativeDifferences);
} else {
return max;
}
}
public static double logAdd(List<Double> logV) {
double max = Double.NEGATIVE_INFINITY;
double maxIndex = 0;
for (int i = 0; i < logV.size(); i++) {
if (logV.get(i) > max) {
max = logV.get(i);
maxIndex = i;
}
}
if (max == Double.NEGATIVE_INFINITY)
return Double.NEGATIVE_INFINITY;
// compute the negative difference
double threshold = max - LOGTOLERANCE;
double sumNegativeDifferences = 0.0;
for (int i = 0; i < logV.size(); i++) {
if (i != maxIndex && logV.get(i) > threshold) {
sumNegativeDifferences += Math.exp(logV.get(i) - max);
}
}
if (sumNegativeDifferences > 0.0) {
return max + Math.log(1.0 + sumNegativeDifferences);
} else {
return max;
}
}
public static float logAdd_Old(float[] logV) {
float max = Float.NEGATIVE_INFINITY;
float maxIndex = 0;
for (int i = 0; i < logV.length; i++) {
if (logV[i] > max) {
max = logV[i];
maxIndex = i;
}
}
if (max == Float.NEGATIVE_INFINITY)
return Float.NEGATIVE_INFINITY;
// compute the negative difference
float threshold = max - LOGTOLERANCE_F;
float sumNegativeDifferences = 0.0f;
for (int i = 0; i < logV.length; i++) {
if (i != maxIndex && logV[i] > threshold) {
sumNegativeDifferences += Math.exp(logV[i] - max);
}
}
if (sumNegativeDifferences > 0.0) {
return max + (float) Math.log(1.0f + sumNegativeDifferences);
} else {
return max;
}
}
/*
* adds up the entries logV[0], logV[1], ... , logV[lastIndex-1]
*/
public static float logAdd(float[] logV, int lastIndex) {
if (lastIndex == 0)
return Float.NEGATIVE_INFINITY;
float max = Float.NEGATIVE_INFINITY;
float maxIndex = 0;
for (int i = 0; i < lastIndex; i++) {
if (logV[i] > max) {
max = logV[i];
maxIndex = i;
}
}
if (max == Float.NEGATIVE_INFINITY)
return Float.NEGATIVE_INFINITY;
// compute the negative difference
float threshold = max - LOGTOLERANCE_F;
double sumNegativeDifferences = 0.0;
for (int i = 0; i < lastIndex; i++) {
if (i != maxIndex && logV[i] > threshold) {
sumNegativeDifferences += Math.exp((logV[i] - max));
}
}
if (sumNegativeDifferences > 0.0) {
return max + (float) Math.log(1.0 + sumNegativeDifferences);
} else {
return max;
}
}
/*
* adds up the entries logV[0], logV[1], ... , logV[lastIndex-1]
*/
public static double logAdd(double[] logV, int lastIndex) {
if (lastIndex == 0)
return Double.NEGATIVE_INFINITY;
double max = Double.NEGATIVE_INFINITY;
double maxIndex = 0;
for (int i = 0; i < lastIndex; i++) {
if (logV[i] > max) {
max = logV[i];
maxIndex = i;
}
}
if (max == Double.NEGATIVE_INFINITY)
return Double.NEGATIVE_INFINITY;
// compute the negative difference
double threshold = max - LOGTOLERANCE;
double sumNegativeDifferences = 0.0;
for (int i = 0; i < lastIndex; i++) {
if (i != maxIndex && logV[i] > threshold) {
sumNegativeDifferences += Math.exp((logV[i] - max));
}
}
if (sumNegativeDifferences > 0.0) {
return max + Math.log(1.0 + sumNegativeDifferences);
} else {
return max;
}
}
/**
* Similar to logAdd, but without the final log. I.e. Sum_i exp(logV_i)
*
* @param logV
* @return
*/
public static float addExp_Old(float[] logV) {
float max = Float.NEGATIVE_INFINITY;
float maxIndex = 0;
for (int i = 0; i < logV.length; i++) {
if (logV[i] > max) {
max = logV[i];
maxIndex = i;
}
}
if (max == Float.NEGATIVE_INFINITY)
return Float.NEGATIVE_INFINITY;
// compute the negative difference
float threshold = max - LOGTOLERANCE_F;
float sumNegativeDifferences = 0.0f;
for (int i = 0; i < logV.length; i++) {
if (i != maxIndex && logV[i] > threshold) {
sumNegativeDifferences += Math.exp(logV[i] - max);
}
}
return (float) Math.exp(max) * (1.0f + sumNegativeDifferences);
}
/*
* adds up the entries logV[0], logV[1], ... , logV[lastIndex-1]
*/
public static float addExp(float[] logV, int lastIndex) {
if (lastIndex == 0)
return Float.NEGATIVE_INFINITY;
float max = Float.NEGATIVE_INFINITY;
float maxIndex = 0;
for (int i = 0; i < lastIndex; i++) {
if (logV[i] > max) {
max = logV[i];
maxIndex = i;
}
}
if (max == Float.NEGATIVE_INFINITY)
return Float.NEGATIVE_INFINITY;
// compute the negative difference
float threshold = max - LOGTOLERANCE_F;
float sumNegativeDifferences = 0.0f;
for (int i = 0; i < lastIndex; i++) {
if (i != maxIndex && logV[i] > threshold) {
sumNegativeDifferences += Math.exp(logV[i] - max);
}
}
return (float) Math.exp(max) * (1.0f + sumNegativeDifferences);
}
/**
* Computes n choose k in an efficient way. Works with k == 0 or k == n but
* undefined if k < 0 or k > n
*
* @param n
* @param k
* @return fact(n) / fact(k) * fact(n-k)
*/
public static int nChooseK(int n, int k) {
k = Math.min(k, n - k);
if (k == 0) {
return 1;
}
int accum = n;
for (int i = 1; i < k; i++) {
accum *= (n - i);
accum /= i;
}
return accum / k;
}
/**
* exponentiation like we learned in grade school: multiply b by itself e
* times. Uses power of two trick. e must be nonnegative!!! no checking!!!
*
* @param b
* base
* @param e
* exponent
* @return b^e
*/
public static int intPow(int b, int e) {
if (e == 0) {
return 1;
}
int result = 1;
int currPow = b;
do {
if ((e & 1) == 1)
result *= currPow;
currPow = currPow * currPow;
e >>= 1;
} while (e > 0);
return result;
}
/**
* exponentiation like we learned in grade school: multiply b by itself e
* times. Uses power of two trick. e must be nonnegative!!! no checking!!!
*
* @param b
* base
* @param e
* exponent
* @return b^e
*/
public static float intPow(float b, int e) {
if (e == 0) {
return 1;
}
float result = 1;
float currPow = b;
do {
if ((e & 1) == 1)
result *= currPow;
currPow = currPow * currPow;
e >>= 1;
} while (e > 0);
return result;
}
/**
* exponentiation like we learned in grade school: multiply b by itself e
* times. Uses power of two trick. e must be nonnegative!!! no checking!!!
*
* @param b
* base
* @param e
* exponent
* @return b^e
*/
public static double intPow(double b, int e) {
if (e == 0) {
return 1;
}
float result = 1;
double currPow = b;
do {
if ((e & 1) == 1)
result *= currPow;
currPow = currPow * currPow;
e >>= 1;
} while (e > 0);
return result;
}
/**
* Find a hypergeometric distribution. This uses exact math, trying fairly
* hard to avoid numeric overflow by interleaving multiplications and
* divisions. (To do: make it even better at avoiding overflow, by using
* loops that will do either a multiple or divide based on the size of the
* intermediate result.)
*
* @param k
* The number of black balls drawn
* @param n
* The total number of balls
* @param r
* The number of black balls
* @param m
* The number of balls drawn
* @return The hypergeometric value
*/
public static double hypergeometric(int k, int n, int r, int m) {
if (k < 0 || r > n || m > n || n <= 0 || m < 0 | r < 0) {
throw new IllegalArgumentException("Invalid hypergeometric");
}
// exploit symmetry of problem
if (m > n / 2) {
m = n - m;
k = r - k;
}
if (r > n / 2) {
r = n - r;
k = m - k;
}
if (m > r) {
int temp = m;
m = r;
r = temp;
}
// now we have that k <= m <= r <= n/2
if (k < (m + r) - n || k > m) {
return 0.0;
}
// Do limit cases explicitly
// It's unclear whether this is a good idea. I put it in fearing
// numerical errors when the numbers seemed off, but actually there
// was a bug in the Fisher's exact routine.
if (r == n) {
if (k == m) {
return 1.0;
} else {
return 0.0;
}
} else if (r == n - 1) {
if (k == m) {
return (n - m) / (double) n;
} else if (k == m - 1) {
return m / (double) n;
} else {
return 0.0;
}
} else if (m == 1) {
if (k == 0) {
return (n - r) / (double) n;
} else if (k == 1) {
return r / (double) n;
} else {
return 0.0;
}
} else if (m == 0) {
if (k == 0) {
return 1.0;
} else {
return 0.0;
}
} else if (k == 0) {
double ans = 1.0;
for (int m0 = 0; m0 < m; m0++) {
ans *= ((n - r) - m0);
ans /= (n - m0);
}
return ans;
}
double ans = 1.0;
// do (n-r)x...x((n-r)-((m-k)-1))/n x...x (n-((m-k-1)))
// leaving rest of denominator to getFromOrigin to multimply by (n-(m-1))
// that's k things which goes into next loop
for (int nr = n - r, n0 = n; nr > (n - r) - (m - k); nr--, n0--) {
// System.out.println("Multiplying by " + nr);
ans *= nr;
// System.out.println("Dividing by " + n0);
ans /= n0;
}
// System.out.println("Done phase 1");
for (int k0 = 0; k0 < k; k0++) {
ans *= (m - k0);
// System.out.println("Multiplying by " + (m-k0));
ans /= ((n - (m - k0)) + 1);
// System.out.println("Dividing by " + ((n-(m+k0)+1)));
ans *= (r - k0);
// System.out.println("Multiplying by " + (r-k0));
ans /= (k0 + 1);
// System.out.println("Dividing by " + (k0+1));
}
return ans;
}
/**
* Find a one tailed exact binomial test probability. Finds the chance of
* this or a higher result
*
* @param k
* number of successes
* @param n
* Number of trials
* @param p
* Probability of a success
*/
public static double exactBinomial(int k, int n, double p) {
double total = 0.0;
for (int m = k; m <= n; m++) {
double nChooseM = 1.0;
for (int r = 1; r <= m; r++) {
nChooseM *= (n - r) + 1;
nChooseM /= r;
}
// System.out.println(n + " choose " + m + " is " + nChooseM);
// System.out.println("prob contribution is " +
// (nChooseM * Math.pow(p, m) * Math.pow(1.0-p, n - m)));
total += nChooseM * Math.pow(p, m) * Math.pow(1.0 - p, n - m);
}
return total;
}
/**
* Find a one-tailed Fisher's exact probability. Chance of having seen this
* or a more extreme departure from what you would have expected given
* independence. I.e., k >= the value passed in. Warning: this was done just
* for collocations, where you are concerned with the case of k being larger
* than predicted. It doesn't correctly handle other cases, such as k being
* smaller than expected.
*
* @param k
* The number of black balls drawn
* @param n
* The total number of balls
* @param r
* The number of black balls
* @param m
* The number of balls drawn
* @return The Fisher's exact p-value
*/
public static double oneTailedFishersExact(int k, int n, int r, int m) {
if (k < 0 || k < (m + r) - n || k > r || k > m || r > n || m > n) {
throw new IllegalArgumentException("Invalid Fisher's exact: " + "k=" + k + " n=" + n + " r=" + r + " m=" + m
+ " k<0=" + (k < 0) + " k<(m+r)-n=" + (k < (m + r) - n) + " k>r=" + (k > r) + " k>m="
+ (k > m) + " r>n=" + (r > n) + "m>n=" + (m > n));
}
// exploit symmetry of problem
if (m > n / 2) {
m = n - m;
k = r - k;
}
if (r > n / 2) {
r = n - r;
k = m - k;
}
if (m > r) {
int temp = m;
m = r;
r = temp;
}
// now we have that k <= m <= r <= n/2
double total = 0.0;
if (k > m / 2) {
// sum from k to m
for (int k0 = k; k0 <= m; k0++) {
// System.out.println("Calling hypg(" + k0 + "; " + n +
// ", " + r + ", " + m + ")");
total += SloppyMath.hypergeometric(k0, n, r, m);
}
} else {
// sum from max(0, (m+r)-n) to k-1, and then subtract from 1
int min = Math.max(0, (m + r) - n);
for (int k0 = min; k0 < k; k0++) {
// System.out.println("Calling hypg(" + k0 + "; " + n +
// ", " + r + ", " + m + ")");
total += SloppyMath.hypergeometric(k0, n, r, m);
}
total = 1.0 - total;
}
return total;
}
/**
* Find a 2x2 chi-square value. Note: could do this more neatly using
* simplified formula for 2x2 case.
*
* @param k
* The number of black balls drawn
* @param n
* The total number of balls
* @param r
* The number of black balls
* @param m
* The number of balls drawn
* @return The Fisher's exact p-value
*/
public static double chiSquare2by2(int k, int n, int r, int m) {
int[][] cg = {{k, r - k}, {m - k, n - (k + (r - k) + (m - k))}};
int[] cgr = {r, n - r};
int[] cgc = {m, n - m};
double total = 0.0;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
double exp = (double) cgr[i] * cgc[j] / n;
total += (cg[i][j] - exp) * (cg[i][j] - exp) / exp;
}
}
return total;
}
public static double exp(double logX) {
// if x is very near one, use the linear approximation
if (Math.abs(logX) < 0.001)
return 1 + logX;
return Math.exp(logX);
}
/**
* Tests the hypergeometric distribution code, or other cooccurrences provided
* in this module.
*
* @param args
* Either none, and the log add rountines are tested, or the
* following 4 arguments: k (cell), n (total), r (row), m (col)
*/
public static void main(String[] args) {
System.out.println(approxLog(0.0));
// if (args.length == 0) {
// System.err.println("Usage: java edu.stanford.nlp.math.SloppyMath " + "[-logAdd|-fishers k n r m|-bionomial r n p");
// } else if (args[0].equals("-logAdd")) {
// System.out.println("Log adds of neg infinity numbers, etc.");
// System.out.println("(logs) -Inf + -Inf = " + logAdd(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY));
// System.out.println("(logs) -Inf + -7 = " + logAdd(Double.NEGATIVE_INFINITY, -7.0));
// System.out.println("(logs) -7 + -Inf = " + logAdd(-7.0, Double.NEGATIVE_INFINITY));
// System.out.println("(logs) -50 + -7 = " + logAdd(-50.0, -7.0));
// System.out.println("(logs) -11 + -7 = " + logAdd(-11.0, -7.0));
// System.out.println("(logs) -7 + -11 = " + logAdd(-7.0, -11.0));
// System.out.println("real 1/2 + 1/2 = " + logAdd(Math.log(0.5), Math.log(0.5)));
// } else if (args[0].equals("-fishers")) {
// int k = Integer.parseInt(args[1]);
// int n = Integer.parseInt(args[2]);
// int r = Integer.parseInt(args[3]);
// int m = Integer.parseInt(args[4]);
// double ans = SloppyMath.hypergeometric(k, n, r, m);
// System.out.println("hypg(" + k + "; " + n + ", " + r + ", " + m + ") = " + ans);
// ans = SloppyMath.oneTailedFishersExact(k, n, r, m);
// System.out.println("1-tailed Fisher's exact(" + k + "; " + n + ", " + r + ", " + m + ") = " + ans);
// double ansChi = SloppyMath.chiSquare2by2(k, n, r, m);
// System.out.println("chiSquare(" + k + "; " + n + ", " + r + ", " + m + ") = " + ansChi);
//
// System.out.println("Swapping arguments should give same hypg:");
// ans = SloppyMath.hypergeometric(k, n, r, m);
// System.out.println("hypg(" + k + "; " + n + ", " + m + ", " + r + ") = " + ans);
// int othrow = n - m;
// int othcol = n - r;
// int cell12 = m - k;
// int cell21 = r - k;
// int cell22 = othrow - (r - k);
// ans = SloppyMath.hypergeometric(cell12, n, othcol, m);
// System.out.println("hypg(" + cell12 + "; " + n + ", " + othcol + ", " + m + ") = " + ans);
// ans = SloppyMath.hypergeometric(cell21, n, r, othrow);
// System.out.println("hypg(" + cell21 + "; " + n + ", " + r + ", " + othrow + ") = " + ans);
// ans = SloppyMath.hypergeometric(cell22, n, othcol, othrow);
// System.out.println("hypg(" + cell22 + "; " + n + ", " + othcol + ", " + othrow + ") = " + ans);
// } else if (args[0].equals("-binomial")) {
// int k = Integer.parseInt(args[1]);
// int n = Integer.parseInt(args[2]);
// double p = Double.parseDouble(args[3]);
// double ans = SloppyMath.exactBinomial(k, n, p);
// System.out.println("Binomial p(X >= " + k + "; " + n + ", " + p + ") = " + ans);
// }
// else if (args[0].equals("-approxExp"))
// {
// int numTrials = 0;
// double sumError = 0;
// double maxError = 0;
// for (double x = -700; x < 700; x += 0.1)
// {
// final double approxExp = approxExp(x);
// final double exp = Math.exp(x);
// double error = Math.abs((exp - approxExp) / exp);
// if (isVeryDangerous(error)) continue;
// maxError = Math.max(error,maxError);
// sumError += error;
// numTrials++;
// }
// double avgError = sumError / numTrials;
// System.out.println("Avg error was: " + avgError);
// System.out.println("Max error was: " + maxError);
// }
// else if (args[0].equals("-approxLog"))
// {
// int numTrials = 0;
// double sumError = 0;
// double maxError = 0;
// double x = Double.MIN_VALUE;
// while (x < Double.MAX_VALUE)
// {
// // if (Math.abs(x - 1) < 0.3) continue;
// final double approxExp = approxLog(x);
// final double exp = Math.log(x);
// double error = Math.abs((exp - approxExp) / exp);
// if (isVeryDangerous(error)) continue;
// maxError = Math.max(error,maxError);
// sumError += error;
// numTrials++;
//
// if (x < Double.MIN_VALUE * 1000000)
// x *= 4;
// else x *= 1.0001;
// }
// double avgError = sumError / numTrials;
// System.out.println("Avg error was: " + avgError);
// System.out.println("Max error was: " + maxError);
//
//
// } else {
// System.err.println("Unknown option: " + args[0]);
// }
}
public static double noNaNDivide(double num, double denom) {
return denom == 0.0 ? 0.0 : num / denom;
}
public static double approxLog(double val) {
if (val < 0.0)
return Double.NaN;
if (val == 0.0)
return Double.NEGATIVE_INFINITY;
double r = val - 1;
if (Math.abs(r) < 0.3) {
// use first few terms of taylor series
final double rSquared = r * r;
return r - rSquared / 2 + rSquared * r / 3;
}
final double x = (Double.doubleToLongBits(val) >> 32);
return (x - 1072632447) / 1512775;
}
public static double approxExp(double val) {
if (Math.abs(val) < 0.1)
return 1 + val;
final long tmp = (long) (1512775 * val + (1072693248 - 60801));
return Double.longBitsToDouble(tmp << 32);
}
public static double approxPow(final double a, final double b) {
final int tmp = (int) (Double.doubleToLongBits(a) >> 32);
final int tmp2 = (int) (b * (tmp - 1072632447) + 1072632447);
return Double.longBitsToDouble(((long) tmp2) << 32);
}
public static double logSubtract(double a, double b) {
if (a > b) {
// logA logB
// (logA - logB) = (log
return a + Math.log(1.0 - Math.exp(b - a));
} else {
return b + Math.log(-1.0 + Math.exp(a - b));
}
}
public static double unsafeSubtract(double a, double b) {
if (a == b) { // inf - inf (or -inf - -inf)
return 0.0;
}
if (a == Double.NEGATIVE_INFINITY) {
return a;
}
return a - b;
}
public static double unsafeAdd(double a, double b) {
if (a == b) { // inf - inf (or -inf - -inf)
return 0.0;
}
if (a == Double.POSITIVE_INFINITY) {
return a;
}
return a + b;
}
public static <T> double logAdd(Counter<T> counts) {
double[] arr = new double[counts.size()];
int index = 0;
for (Map.Entry<T, Float> entry : counts.entrySet()) {
arr[index++] = entry.getValue();
}
return SloppyMath.logAdd(arr);
}
// public static double approxLogAdd(double a, double b)
// {
//
// final long tmp1 = (long) (1512775 * a + (1072693248 - 60801));
// double ea = Double.longBitsToDouble(tmp1 << 32);
// final long tmp2 = (long) (1512775 * b + (1072693248 - 60801));
// double eb = Double.longBitsToDouble(tmp2 << 32);
//
// final double x = (Double.doubleToLongBits(ea + eb) >> 32);
// return (x - 1072632447) / 1512775;
//
// }
}
| apache-2.0 |
moparisthebest/beehive | beehive-netui-core/src/main/java/org/apache/beehive/netui/pageflow/PageFlowException.java | 3137 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* $Header:$
*/
package org.apache.beehive.netui.pageflow;
import org.apache.beehive.netui.util.Bundle;
import org.apache.beehive.netui.pageflow.internal.InternalUtils;
import org.apache.struts.action.ActionMapping;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.PrintWriter;
/**
* Base class for PageFlow-related Exceptions.
*/
public abstract class PageFlowException
extends PageFlowManagedObjectException
{
private String _actionName;
protected PageFlowException( String actionName, FlowController fc )
{
super( fc );
init( actionName );
}
protected PageFlowException( String actionName, FlowController fc, Throwable cause )
{
super( fc, cause );
init( actionName );
}
protected void init( String actionName )
{
_actionName = actionName != null && actionName.length() > 0 && actionName.charAt(0) == '/'
? actionName.substring( 1 )
: actionName;
}
/**
* Get the related FlowController.
*
* @return the {@link FlowController} associated with this exception.
*/
public FlowController getFlowController()
{
return ( FlowController ) getManagedObject();
}
/**
* Get the name of the related FlowController.
*
* @return the class name of the {@link FlowController} associated with this exception.
*/
public String getFlowControllerURI()
{
FlowController flowController = getFlowController();
return flowController != null ? flowController.getDisplayName() : null;
}
/**
* Get the name of the action associated with this exception.
*
* @return a String that is the name of the action associated with this exception.
*/
public String getActionName()
{
return _actionName;
}
/**
* Tell whether the root cause may be session expiration in cases where the requested session ID is different than
* the actual session ID; if <code>true</code>, then a {@link SessionExpiredException} will be thrown instead of
* this one in these situations.
*/
public abstract boolean causeMayBeSessionExpiration();
}
| apache-2.0 |
dtk/dtk-server | application/model/node/delete.rb | 5353 | #
# Copyright (C) 2010-2016 dtk contributors
#
# This file is part of the dtk project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module DTK; class Node
module Delete
module Mixin
# This wil be called only when self is non node group (i.e., top level node or target ref)
def destroy_and_delete(opts = {})
if is_node_group?()
# TODO: support this; one way is to case on whether it has any members and if not
# allow it to be deleted; and if members indicate the syntax to delete an individual member"
fail ErrorUsage.new('Not supported: deleting a node group; its members can be deleted')
end
if is_target_ref?
destroy_and_delete__target_ref(opts)
else
destroy_and_delete__top_level_node(opts)
end
end
# only for node group members
def soft_delete(opts = {})
return unless is_target_ref?
node_group_member = NodeGroup::NodeGroupMember.create_as(self)
node_group_member.soft_delete()
end
def destroy_and_reset(target_idh)
fail ErrorUsage.new('Command Not Supperetd')
# TODO: DTK-1857
if is_node_group?() || is_target_ref?()
fail ErrorUsage.new('destroy_and_reset_nodes not supported for service instances with node groups')
end
if CommandAndControl.destroy_node?(self, reset: true)
Model.delete_instance(target_ref.id_handle) if target_ref
StateChange.create_pending_change_item(new_item: id_handle(), parent: target_idh)
end
update_agent_git_commit_id(nil)
attribute.clear_host_addresses()
end
def delete_object(opts = {})
if target_ref_idh = opts[:delete_target_ref]
Model.delete_instance(target_ref_idh)
end
update_dangling_links()
if is_target_ref?()
# This wil be a node group member; need to bump down is assocaited node groups cardinality
node_group_member = NodeGroup::NodeGroupMember.create_as(self)
unless opts[:dont_change_cardinality]
node_group_member.update_object!(:ng_member_deleted)
node_group_member.bump_down_associated_node_group_cardinality() unless node_group_member[:ng_member_deleted]
end
end
if opts[:update_task_template] # TODO: think we can cleanup upstream to call option :cleanup, a better description
assembly = opts[:assembly] || fail(Error, "If update_task_template is set, :assembly must be given as an option")
# updates task template to remove all references to node, which includes removing subtask temporaly spliced that does actual delete
Task::Template::ConfigComponents.cleanup_after_node_has_been_deleted?(assembly, self)
end
Model.delete_instance(id_handle())
true
end
private
def destroy_and_delete__target_ref(opts = {})
suceeeded = true
if is_target_ref?(not_deletable: true)
# no op
return suceeeded
end
# check the reference count on the target ref; if one (or less can delet) since this
# is being initiated by a node group or top level node pointing to it
# if more than 1 reference count than succeed with no op
ref_count = TargetRef.get_reference_count(self)
if ref_count < 2
execute_destroy_and_delete(opts)
else
# no op
true
end
end
def destroy_and_delete__top_level_node(opts)
# see if there are any target refs this points to this
# if none then destroy and delete
# if 1 then check reference count
# since this is not anode group target_refs_info should not have size greater than 1
target_refs_info = TargetRef.get_linked_target_refs_info(self)
if target_refs_info.empty?
execute_destroy_and_delete(opts)
elsif target_refs_info.size == 1
target_ref_info = target_refs_info.first
opts_delete = opts
target_ref = target_ref_info.target_ref
if target_ref && target_ref_info.ref_count == 1
# this means to delete target ref also
# TODO: below is a bug since below does nothing
opts_delete.merge(delete_target_ref: target_ref.id_handle())
end
execute_destroy_and_delete(opts)
else
Log.error("Unexpected that (#{inspect}) is linked to more than 1 target refs")
delete_object(opts)
end
end
def execute_destroy_and_delete(opts = {})
# TODO: DTK-3010: no special node delete; remove this comment when remove commmented out below
# suceeeded = CommandAndControl.destroy_node?(self)
# return false unless suceeeded
delete_object(opts)
end
end
end
end; end
| apache-2.0 |
OLR-xray/XRay-NEW | XRay/xr_3da/xrGame/space_restriction_composition.cpp | 6128 | ////////////////////////////////////////////////////////////////////////////
// Module : space_restriction_composition.cpp
// Created : 17.08.2004
// Modified : 27.08.2004
// Author : Dmitriy Iassenev
// Description : Space restriction composition
////////////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include "space_restriction_composition.h"
#include "space_restriction_holder.h"
#include "space_restriction_bridge.h"
#include "restriction_space.h"
#include "ai_space.h"
#include "level_graph.h"
#include "graph_engine.h"
#pragma warning(push)
#pragma warning(disable:4995)
#include <malloc.h>
#pragma warning(pop)
#ifdef DEBUG
# include "level.h"
# include "space_restrictor.h"
#endif // DEBUG
int g_restriction_checker = 0;
CSpaceRestrictionComposition::~CSpaceRestrictionComposition ()
{
--g_restriction_checker;
}
struct CMergePredicate {
CSpaceRestrictionComposition *m_restriction;
IC CMergePredicate (CSpaceRestrictionComposition *restriction)
{
m_restriction = restriction;
}
IC bool operator() (u32 level_vertex_id) const
{
return (m_restriction->inside(level_vertex_id,false));
}
};
IC void CSpaceRestrictionComposition::merge (CBaseRestrictionPtr restriction)
{
m_restrictions.push_back (restriction);
m_border.insert (m_border.begin(),restriction->border().begin(),restriction->border().end());
}
bool CSpaceRestrictionComposition::inside (const Fsphere &sphere)
{
if (!initialized()) {
initialize ();
if (!initialized())
return (true);
}
if (!m_sphere.intersect(sphere))
return (false);
RESTRICTIONS::iterator I = m_restrictions.begin();
RESTRICTIONS::iterator E = m_restrictions.end();
for ( ; I != E; ++I)
if ((*I)->inside(sphere))
return (true);
return (false);
}
void CSpaceRestrictionComposition::initialize ()
{
u32 n = _GetItemCount(*m_space_restrictors);
VERIFY (n);
if (n == 1) {
#ifdef DEBUG
m_correct = true;
check_restrictor_type ();
#endif
return;
}
string256 element;
for (u32 i=0; i<n ;++i)
if (!m_space_restriction_holder->restriction(_GetItem(*m_space_restrictors,i,element))->initialized())
return;
Fsphere *spheres = (Fsphere*)_alloca(n*sizeof(Fsphere));
for (u32 i=0; i<n ;++i) {
SpaceRestrictionHolder::CBaseRestrictionPtr restriction =
m_space_restriction_holder->restriction(
_GetItem(
*m_space_restrictors,
i,
element
)
);
merge (restriction);
spheres[i] = restriction->sphere();
}
// computing almost minimum sphere which covers all the almost minimum spheres
Fbox3 temp;
temp.min.x = spheres[0].P.x - spheres[0].R;
temp.min.y = spheres[0].P.y - spheres[0].R;
temp.min.z = spheres[0].P.z - spheres[0].R;
temp.max.x = spheres[0].P.x + spheres[0].R;
temp.max.y = spheres[0].P.y + spheres[0].R;
temp.max.z = spheres[0].P.z + spheres[0].R;
for (u32 i=1; i<n; ++i) {
temp.min.x = _min(temp.min.x,spheres[i].P.x - spheres[i].R);
temp.min.y = _min(temp.min.y,spheres[i].P.y - spheres[i].R);
temp.min.z = _min(temp.min.z,spheres[i].P.z - spheres[i].R);
temp.max.x = _max(temp.max.x,spheres[i].P.x + spheres[i].R);
temp.max.y = _max(temp.max.y,spheres[i].P.y + spheres[i].R);
temp.max.z = _max(temp.max.z,spheres[i].P.z + spheres[i].R);
}
m_sphere.P.mad (temp.min,temp.max,.5f);
m_sphere.R = m_sphere.P.distance_to(spheres[0].P) + spheres[0].R;
for (u32 i=1; i<n ;++i)
m_sphere.R = _max(m_sphere.R,m_sphere.P.distance_to(spheres[i].P) + spheres[i].R);
m_sphere.R += EPS_L;
m_initialized = true;
xr_vector<u32>::iterator I = remove_if(m_border.begin(),m_border.end(),CMergePredicate(this));
m_border.erase (I,m_border.end());
process_borders ();
#ifdef DEBUG
test_correctness ();
#endif
}
#ifdef DEBUG
void CSpaceRestrictionComposition::test_correctness()
{
m_correct = true;
m_test_storage.clear ();
{
RESTRICTIONS::iterator I = m_restrictions.begin();
RESTRICTIONS::iterator E = m_restrictions.end();
for ( ; I != E; ++I)
m_test_storage.insert (m_test_storage.end(),(*I)->object().m_test_storage.begin(),(*I)->object().m_test_storage.end());
}
{
std::sort (m_test_storage.begin(),m_test_storage.end());
xr_vector<u32>::iterator I = unique(m_test_storage.begin(),m_test_storage.end());
m_test_storage.erase (I,m_test_storage.end());
}
if (m_test_storage.empty()) {
m_correct = false;
return;
}
xr_vector<u32> nodes;
{
RESTRICTIONS::iterator I = m_restrictions.begin();
RESTRICTIONS::iterator E = m_restrictions.end();
for ( ; I != E; ++I) {
VERIFY3 (!(*I)->object().m_test_storage.empty(),"Restrictor has no border",*(*I)->object().name());
nodes.clear ();
ai().level_graph().set_mask (border());
ai().graph_engine().search (ai().level_graph(), (*I)->object().m_test_storage.back(), (*I)->object().m_test_storage.back(), &nodes, GraphEngineSpace::CFlooder());
ai().level_graph().clear_mask (border());
if (nodes.size() == 65535)
m_correct = true;
else
m_correct = (m_test_storage.size() <= nodes.size());
if (!m_correct)
break;
}
}
}
#endif
Fsphere CSpaceRestrictionComposition::sphere () const
{
NODEFAULT;
#ifdef DEBUG
return (m_sphere);
#endif
}
#ifdef DEBUG
void CSpaceRestrictionComposition::check_restrictor_type()
{
if (_GetItemCount(*m_space_restrictors) == 1)
return;
if (!ai().get_alife())
return;
CObject *object = Level().Objects.FindObjectByName(m_space_restrictors);
if (!object)
return;
CSpaceRestrictor *restrictor = smart_cast<CSpaceRestrictor*>(object);
VERIFY3 (restrictor,"you are trying to use object as a restrictor",*m_space_restrictors);
VERIFY2 (restrictor->restrictor_type() == RestrictionSpace::eRestrictorTypeNone,"you are trying to restrict yourself with restrictor with type eRestrictorTypeNone");
VERIFY2 (restrictor->restrictor_type() != RestrictionSpace::eRestrictorTypeNone,"impossible situation: wrong net_Spawn branch used");
}
#endif // DEBUG | apache-2.0 |
Windows-Applications-Captain-Cold/Mobile-Food-Ordering | EatFast-Server/app/dbRequester.js | 1044 | var User = require('./models/User.js');
module.exports = function () {
function getUserById(id) {
return new Promise(function (resolve, reject) {
User.findOne({_id: id}, function (error, user) {
if (error) {
reject(error);
}
if (!user) {
console.error("No location found whit id: " + id + " !...");
}
resolve(user);
});
});
}
function getUserByParams(params) {
return new Promise(function (resolve, reject) {
User.findOne(params, function (error, user) {
if (error) {
reject(error);
}
if (!user) {
console.error("No location found whit id: " + id + " !...");
}
resolve(user);
});
});
}
return {
users: {
getById: getUserById,
get: getUserByParams
}
}
};
| apache-2.0 |
xArthasx/dotfiles | .config/coc/extensions/node_modules/coc-scssmodules/node_modules/coc.nvim/lib/language-client/workspaceFolders.js | 5471 | /* --------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
* ------------------------------------------------------------------------------------------ */
'use strict';
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
const vscode_languageserver_protocol_1 = require("vscode-languageserver-protocol");
const workspace_1 = tslib_1.__importDefault(require("../workspace"));
const UUID = tslib_1.__importStar(require("./utils/uuid"));
const logger = require('../util/logger')('language-client-workspaceFolder');
function access(target, key) {
if (target === void 0) {
return undefined;
}
return target[key];
}
function arrayDiff(left, right) {
return left.filter(element => right.indexOf(element) < 0);
}
class WorkspaceFoldersFeature {
constructor(_client) {
this._client = _client;
this._listeners = new Map();
}
get messages() {
return vscode_languageserver_protocol_1.DidChangeWorkspaceFoldersNotification.type;
}
asProtocol(workspaceFolder) {
if (workspaceFolder === void 0) {
return null;
}
return { uri: workspaceFolder.uri, name: workspaceFolder.name };
}
fillInitializeParams(params) {
const folders = workspace_1.default.workspaceFolders;
this._initialFolders = folders;
if (folders === void 0) {
params.workspaceFolders = null;
}
else {
params.workspaceFolders = folders.map(folder => this.asProtocol(folder));
}
params.workspaceFolders = workspace_1.default.workspaceFolders;
}
fillClientCapabilities(capabilities) {
capabilities.workspace = capabilities.workspace || {};
capabilities.workspace.workspaceFolders = true;
}
initialize(capabilities) {
let client = this._client;
client.onRequest(vscode_languageserver_protocol_1.WorkspaceFoldersRequest.type, (token) => {
let workspaceFolders = () => {
let folders = workspace_1.default.workspaceFolders;
if (folders === void 0) {
return null;
}
let result = folders.map(folder => {
return this.asProtocol(folder);
});
return result;
};
let middleware = client.clientOptions.middleware.workspace;
return middleware && middleware.workspaceFolders
? middleware.workspaceFolders(token, workspaceFolders)
: workspaceFolders(token);
});
let value = access(access(access(capabilities, 'workspace'), 'workspaceFolders'), 'changeNotifications');
let id;
if (typeof value === 'string') {
id = value;
}
else if (value === true) {
id = UUID.generateUuid();
}
if (id) {
this.register(this.messages, {
id,
registerOptions: undefined
});
}
}
doSendEvent(addedFolders, removedFolders) {
let params = {
event: {
added: addedFolders.map(folder => this.asProtocol(folder)),
removed: removedFolders.map(folder => this.asProtocol(folder))
}
};
this._client.sendNotification(vscode_languageserver_protocol_1.DidChangeWorkspaceFoldersNotification.type, params);
}
sendInitialEvent(currentWorkspaceFolders) {
if (this._initialFolders && currentWorkspaceFolders) {
const removed = arrayDiff(this._initialFolders, currentWorkspaceFolders);
const added = arrayDiff(currentWorkspaceFolders, this._initialFolders);
if (added.length > 0 || removed.length > 0) {
this.doSendEvent(added, removed);
}
}
else if (this._initialFolders) {
this.doSendEvent([], this._initialFolders);
}
else if (currentWorkspaceFolders) {
this.doSendEvent(currentWorkspaceFolders, []);
}
}
register(_message, data) {
let id = data.id;
let client = this._client;
let disposable = workspace_1.default.onDidChangeWorkspaceFolders(event => {
let didChangeWorkspaceFolders = (event) => {
this.doSendEvent(event.added, event.removed);
};
let middleware = client.clientOptions.middleware.workspace;
middleware && middleware.didChangeWorkspaceFolders
? middleware.didChangeWorkspaceFolders(event, didChangeWorkspaceFolders)
: didChangeWorkspaceFolders(event);
});
this._listeners.set(id, disposable);
this.sendInitialEvent(workspace_1.default.workspaceFolders);
}
unregister(id) {
let disposable = this._listeners.get(id);
if (disposable === void 0) {
return;
}
this._listeners.delete(id);
disposable.dispose();
}
dispose() {
for (let disposable of this._listeners.values()) {
disposable.dispose();
}
this._listeners.clear();
}
}
exports.WorkspaceFoldersFeature = WorkspaceFoldersFeature;
//# sourceMappingURL=workspaceFolders.js.map | apache-2.0 |
petarfitzpatrick/Project2 | client/login/client.js | 3369 | const handleLogin = (e) => {
e.preventDefault();
$("#postMessage").animate({width:'hide'}, 350);
if($("#user").val() == '' || $("#pass").val() == '') {
handleError("RAWR! Username or password is empty");
return false;
}
console.log($("input[name=_csrf]").val());
sendAjax('POST', $("#loginForm").attr("action"), $("#loginForm").serialize(), redirect);
return false;
};
const handleSignup = (e) => {
e.preventDefault();
$("#postMessage").animate({width:'hide'}, 350);
if($("#user").val() == '' || $("#pass").val() == '' || $("#pass2").val() == '') {
handleError("RAWR! All fields are required");
return false;
}
if($("#pass").val() !== $("#pass2").val()) {
handleError("RAWR! Passwords do not match");
return false;
}
sendAjax('POST', $("#signupForm").attr("action"), $("#signupForm").serialize(), redirect);
return false;
};
const renderLogin = function() {
return (
<form id="loginForm" name="loginForm"
onSubmit={this.handleSubmit}
action="/login"
method="POST"
className="mainForm"
>
<label htmlFor="username">Username: </label>
<input id="user" type="text" name="username" placeholder="username"/>
<label htmlFor="pass">Password: </label>
<input id="pass" type="password" name="pass" placeholder="password"/>
<input type="hidden" name="_csrf" value={this.props.csrf}/>
<input className="formSubmit" type="submit" value="Sign in" />
</form>
);
};
const renderSignup = function() {
return (
<form id="signupForm"
name="signupForm"
onSubmit={this.handleSubmit}
action="/signup"
method="POST"
className="mainForm"
>
<label htmlFor="username">Username: </label>
<input id="user" type="text" name="username" placeholder="username"/>
<label htmlFor="pass">Password: </label>
<input id="pass" type="password" name="pass" placeholder="password"/>
<label htmlFor="pass2">Password: </label>
<input id="pass2" type="password" name="pass2" placeholder="retype password"/>
<input type="hidden" name="_csrf" value={this.props.csrf} />
<input className="formSubmit" type="submit" value="Sign up" />
</form>
);
};
const createLoginWindow = function (csrf) {
const LoginWindow = React.createClass({
handleSubmit: handleLogin,
render: renderLogin
});
ReactDOM.render(
<LoginWindow csrf={csrf} />,
document.querySelector("#content")
);
};
const createSignupWindow = function (csrf) {
const SignupWindow = React.createClass({
handleSubmit: handleSignup,
render: renderSignup
});
ReactDOM.render(
<SignupWindow csrf={csrf} />,
document.querySelector("#content")
);
};
const setup = function(csrf) {
const loginButton = document.querySelector("#loginButton");
const signupButton = document.querySelector("#signupButton");
signupButton.addEventListener("click", (e) => {
e.preventDefault();
createSignupWindow(csrf);
return false;
});
loginButton.addEventListener("click", (e) => {
e.preventDefault();
createLoginWindow(csrf);
return false;
});
createLoginWindow(csrf); //default view
}
const getToken = () => {
sendAjax('GET', '/getToken', null, (result) => {
setup(result.csrfToken);
});
}
$(document).ready(function() {
getToken();
}); | apache-2.0 |
TSavo/GoEvolve | population.go | 3814 | package goevolve
import (
"bytes"
"crypto/sha256"
"encoding/gob"
"fmt"
"github.com/tsavo/GoVirtual"
"io"
"log"
"os"
"time"
)
type Population struct {
Id, RegisterLength int
InstructionSet *govirtual.InstructionSet
Breeder *Breeder
Evaluator *Evaluator
Selector *Selector
TerminationCondition *govirtual.TerminationCondition
ControlChan chan bool
PopulationReportChan chan *PopulationReport
Heap *govirtual.Memory
}
var SolutionCache map[string]*Solution
func init() {
SolutionCache = make(map[string]*Solution)
go func() {
for {
time.Sleep(60 * time.Second)
WriteSolutionCache(EncodeSolutionCache())
}
}()
defer recover()
cache := ReadSolutionCache()
if cache != nil {
SolutionCache = *DecodeSolutionCache(cache)
}
}
func EncodeSolutionCache() (b *bytes.Buffer) {
b = new(bytes.Buffer)
e := gob.NewEncoder(b)
// Encoding the map
err := e.Encode(&SolutionCache)
if err != nil {
panic(err)
}
return
}
func DecodeSolutionCache(b *bytes.Buffer) *map[string]*Solution {
s := make(map[string]*Solution)
d := gob.NewDecoder(b)
// Decoding the serialized data
err := d.Decode(&s)
if err != nil {
return nil
}
return &s
}
func WriteSolutionCache(b *bytes.Buffer) {
f, _ := os.OpenFile("SolutionCache.gob", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0777)
f.Write(b.Bytes()) // Error handling elided for brevity.
f.Close()
}
func ReadSolutionCache() *bytes.Buffer {
buf := bytes.NewBuffer(nil)
f, _ := os.Open("SolutionCache.gob") // Error handling elided for brevity.
written, err := io.Copy(buf, f) // Error handling elided for brevity.
f.Close()
if err == nil && written > 0 {
return buf
}
return nil
}
type Solution struct {
Reward int
Program string
}
type SolutionList []*Solution
func (sol *SolutionList) GetPrograms() []string {
x := make([]string, len(*sol))
for i, solution := range *sol {
x[i] = solution.Program
}
return x
}
type PopulationReport struct {
Id int
SolutionList
}
func (s SolutionList) Len() int { return len(s) }
func (s SolutionList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SolutionList) Less(i, j int) bool { return s[i].Reward > s[j].Reward }
func NewPopulation(id int, sharedMemory *govirtual.Memory, rl int, is *govirtual.InstructionSet, term govirtual.TerminationCondition, gen Breeder, eval Evaluator, selector Selector) *Population {
return &Population{id, rl, is, &gen, &eval, &selector, &term, make(chan bool, 1), make(chan *PopulationReport, 1), sharedMemory}
}
func (s *Population) Run() {
programs := (*s.Breeder).Breed((*s.Breeder).Breed(nil))
processors := make([]*govirtual.Processor, 0)
for {
solutions := make(SolutionList, len(programs))
for len(processors) < len(solutions) {
c := govirtual.NewProcessor(s.Id, s.RegisterLength, s.InstructionSet, s.Heap, s.TerminationCondition)
processors = append(processors, c)
}
if len(processors) > len(solutions) {
processors = processors[:len(solutions)]
}
for x, pro := range processors {
select {
case <-s.ControlChan:
return
default:
}
log.Printf("#%d: %d\n", s.Id, x)
sha := fmt.Sprintf("%x", sha256.Sum256([]byte(programs[x])))
sol, notNeeded := SolutionCache[sha]
if notNeeded {
solutions[x] = sol
} else {
pro.Reset()
pro.CompileAndLoad(programs[x])
pro.Run()
solutions[x] = &Solution{(*s.Evaluator).Evaluate(pro), programs[x]}
potential, present := SolutionCache[sha]
if !present || potential.Reward > solutions[x].Reward {
SolutionCache[sha] = solutions[x]
}
}
}
select {
case s.PopulationReportChan <- &PopulationReport{s.Id, solutions}:
default:
}
programs = (*s.Breeder).Breed((*s.Selector).Select(&solutions).GetPrograms())
}
}
| apache-2.0 |
dselsam/lean | src/library/scoped_ext.cpp | 6957 | /*
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
*/
#include <vector>
#include <memory>
#include <string>
#include "util/sstream.h"
#include "library/scoped_ext.h"
namespace lean {
typedef std::tuple<push_scope_fn, pop_scope_fn> entry;
typedef std::vector<entry> scoped_exts;
static scoped_exts * g_exts = nullptr;
static scoped_exts & get_exts() { return *g_exts; }
void register_scoped_ext(push_scope_fn push, pop_scope_fn pop) {
get_exts().emplace_back(push, pop);
}
struct scope_mng_ext : public environment_extension {
name_set m_namespace_set; // all namespaces registered in the system
name_set m_opened_namespaces; // set of namespaces marked as "open"
list<name> m_namespaces; // stack of namespaces/sections
list<name> m_headers; // namespace/section header
list<scope_kind> m_scope_kinds;
};
struct scope_mng_ext_reg {
unsigned m_ext_id;
scope_mng_ext_reg() { m_ext_id = environment::register_extension(std::make_shared<scope_mng_ext>()); }
};
static scope_mng_ext_reg * g_ext = nullptr;
static scope_mng_ext const & get_extension(environment const & env) {
return static_cast<scope_mng_ext const &>(env.get_extension(g_ext->m_ext_id));
}
static environment update(environment const & env, scope_mng_ext const & ext) {
return env.update(g_ext->m_ext_id, std::make_shared<scope_mng_ext>(ext));
}
name const & get_namespace(environment const & env) {
scope_mng_ext const & ext = get_extension(env);
return !is_nil(ext.m_namespaces) ? head(ext.m_namespaces) : name::anonymous();
}
name const & get_scope_header(environment const & env) {
scope_mng_ext const & ext = get_extension(env);
return !is_nil(ext.m_namespaces) ? head(ext.m_headers) : name::anonymous();
}
list<name> const & get_namespaces(environment const & env) {
return get_extension(env).m_namespaces;
}
bool in_section(environment const & env) {
scope_mng_ext const & ext = get_extension(env);
return !is_nil(ext.m_scope_kinds) && head(ext.m_scope_kinds) == scope_kind::Section;
}
environment mark_namespace_as_open(environment const & env, name const & n) {
scope_mng_ext ext = get_extension(env);
ext.m_opened_namespaces.insert(n);
return update(env, ext);
}
name_set get_opened_namespaces(environment const & env) {
return get_extension(env).m_opened_namespaces;
}
bool is_namespace(environment const & env, name const & n) {
return get_extension(env).m_namespace_set.contains(n);
}
optional<name> to_valid_namespace_name(environment const & env, name const & n) {
scope_mng_ext const & ext = get_extension(env);
if (ext.m_namespace_set.contains(n))
return optional<name>(n);
for (auto const & ns : ext.m_namespaces) {
name r = ns + n;
if (ext.m_namespace_set.contains(r))
return optional<name>(r);
}
return optional<name>();
}
std::vector<name> get_namespace_completion_candidates(environment const & env) {
std::vector<name> ret;
scope_mng_ext const & ext = get_extension(env);
ext.m_namespace_set.for_each([&](name const & ns) {
ret.push_back(ns);
for (auto const & open_ns : ext.m_namespaces)
if (open_ns != ns && is_prefix_of(open_ns, ns))
ret.push_back(ns.replace_prefix(open_ns, {}));
});
return ret;
}
struct new_namespace_modification : public modification {
LEAN_MODIFICATION("nspace")
name m_ns;
new_namespace_modification(name const & ns) : m_ns(ns) {}
new_namespace_modification() {}
void perform(environment & env) const override {
scope_mng_ext ext = get_extension(env);
ext.m_namespace_set.insert(m_ns);
env = update(env, ext);
}
void serialize(serializer & s) const override {
s << m_ns;
}
void textualize(tlean_exporter & x) const override {
unsigned n = x.export_name(m_ns);
x.out() << "#NEW_NAMESPACE " << n << std::endl;;
}
static std::shared_ptr<modification const> deserialize(deserializer & d) {
name n;
d >> n;
return std::make_shared<new_namespace_modification>(n);
}
};
environment add_namespace(environment const & env, name const & ns) {
scope_mng_ext ext = get_extension(env);
if (!ext.m_namespace_set.contains(ns)) {
ext.m_namespace_set.insert(ns);
environment r = update(env, ext);
r = module::add(r, std::make_shared<new_namespace_modification>(ns));
if (ns.is_atomic())
return r;
else
return add_namespace(r, ns.get_prefix());
} else {
return env;
}
}
environment push_scope(environment const & env, io_state const & ios, scope_kind k, name const & n) {
name new_n = get_namespace(env);
if (k == scope_kind::Namespace)
new_n = new_n + n;
scope_mng_ext ext = get_extension(env);
bool save_ns = false;
if (!ext.m_namespace_set.contains(new_n)) {
save_ns = true;
ext.m_namespace_set.insert(new_n);
}
ext.m_namespaces = cons(new_n, ext.m_namespaces);
ext.m_headers = cons(n, ext.m_headers);
ext.m_scope_kinds = cons(k, ext.m_scope_kinds);
environment r = update(env, ext);
for (auto const & t : get_exts()) {
r = std::get<0>(t)(r, ios, k);
}
if (save_ns)
r = module::add(r, std::make_shared<new_namespace_modification>(new_n));
return r;
}
environment pop_scope_core(environment const & env, io_state const & ios) {
scope_mng_ext ext = get_extension(env);
if (is_nil(ext.m_namespaces))
return env;
scope_kind k = head(ext.m_scope_kinds);
ext.m_namespaces = tail(ext.m_namespaces);
ext.m_headers = tail(ext.m_headers);
ext.m_scope_kinds = tail(ext.m_scope_kinds);
environment r = update(env, ext);
for (auto const & t : get_exts()) {
r = std::get<1>(t)(r, ios, k);
}
return r;
}
environment pop_scope(environment const & env, io_state const & ios, name const & n) {
scope_mng_ext ext = get_extension(env);
if (is_nil(ext.m_namespaces))
throw exception("invalid end of scope, there are no open namespaces/sections");
if (n != head(ext.m_headers))
throw exception(sstream() << "invalid end of scope, begin/end mismatch, scope starts with '"
<< head(ext.m_headers) << "', and ends with '" << n << "'");
return pop_scope_core(env, ios);
}
bool has_open_scopes(environment const & env) {
scope_mng_ext ext = get_extension(env);
return !is_nil(ext.m_namespaces);
}
void initialize_scoped_ext() {
g_exts = new scoped_exts();
g_ext = new scope_mng_ext_reg();
new_namespace_modification::init();
}
void finalize_scoped_ext() {
new_namespace_modification::finalize();
delete g_exts;
delete g_ext;
}
}
| apache-2.0 |
dyu/bookmarks | bookmarks-cli/src/main/java/bookmarks/cli/Main.java | 1908 | // TODO copyright header
package bookmarks.cli;
import io.airlift.command.Cli.CliBuilder;
import io.airlift.command.ParseException;
import com.dyuproject.protostuffdb.EntityMetadataRegistry;
import com.dyuproject.protostuffdb.TagMetadata;
import com.dyuproject.protostuffdb.CliUtil;
import com.dyuproject.protostuffdb.DSTool;
/**
* The main class.
*/
public final class Main
{
private Main() {}
public enum Modules implements TagMetadata
{
USER(bookmarks.user.EntityRegistry.REGISTRY)
{
@Override
public boolean isUserManaged(int tag)
{
return false;
}
@Override
public String getName(int tag)
{
return null;
}
@Override
void configure(CliBuilder<Runnable> builder)
{
// you can add custom commands to append to the builder
}
}
// add another enum value for your other modules
;
abstract void configure(CliBuilder<Runnable> builder);
public final EntityMetadataRegistry registry;
private Modules(EntityMetadataRegistry registry)
{
this.registry = registry;
}
}
public static void main(String[] args)
{
final CliBuilder<Runnable> builder = DSTool.newBuilder();
for(Modules m : Modules.values())
{
DSTool.register(m.name().toLowerCase(), m.registry, m);
m.configure(builder);
}
try
{
for(Runnable r : CliUtil.getRunnables(args, builder.build()))
r.run();
}
catch(IllegalArgumentException e)
{
System.err.println(e.getMessage());
}
catch (ParseException e)
{
System.err.println(e.getMessage());
}
}
}
| apache-2.0 |
kenwenzel/leveldb | leveldb/src/main/java/org/iq80/leveldb/util/fpc/FpcTest.java | 1374 | /*
* Copyright (C) 2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.iq80.leveldb.util.fpc;
import java.nio.ByteBuffer;
public class FpcTest {
public static void main(String... args) {
FpcCompressor comp = new FpcCompressor(1 << 16);
double[] template = { 0.0, 0.0123, 0.0532324, 0.02, 0.03344 };
double[] values = new double[31];
for (int idx = 0; idx < values.length; idx++) {
values[idx] = template[idx % template.length];
}
ByteBuffer bb = ByteBuffer.allocate(values.length * 8);
comp.compress(bb, values);
System.out.println(bb.position() / (double) values.length);
bb.flip();
double[] values2 = new double[values.length];
comp.decompress(bb, values2);
}
}
| apache-2.0 |
T-Systems-MMS/perfsig-jenkins | dynatrace/src/main/java/de/tsystems/mms/apm/performancesignature/dynatracesaas/rest/model/TechnologyInfo.java | 2757 | /*
* Copyright (c) 2014-2018 T-Systems Multimedia Solutions GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Dynatrace Environment API
* Documentation of the Dynatrace REST API. Refer to the [help page](https://www.dynatrace.com/support/help/shortlink/section-api) to read about use-cases and examples.
*
* OpenAPI spec version: 1.0
*
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package de.tsystems.mms.apm.performancesignature.dynatracesaas.rest.model;
import com.google.gson.annotations.SerializedName;
import static de.tsystems.mms.apm.performancesignature.ui.util.PerfSigUIUtils.toIndentedString;
/**
* TechnologyInfo
*/
public class TechnologyInfo {
@SerializedName("type")
private String type;
@SerializedName("edition")
private String edition;
@SerializedName("version")
private String version;
public TechnologyInfo type(String type) {
this.type = type;
return this;
}
/**
* Get type
*
* @return type
**/
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public TechnologyInfo edition(String edition) {
this.edition = edition;
return this;
}
/**
* Get edition
*
* @return edition
**/
public String getEdition() {
return edition;
}
public void setEdition(String edition) {
this.edition = edition;
}
public TechnologyInfo version(String version) {
this.version = version;
return this;
}
/**
* Get version
*
* @return version
**/
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
@Override
public String toString() {
return "class TechnologyInfo {\n"
+ " type: " + toIndentedString(type) + "\n"
+ " edition: " + toIndentedString(edition) + "\n"
+ " version: " + toIndentedString(version) + "\n"
+ "}";
}
}
| apache-2.0 |
leiyaoshun/jeesite | src/main/java/com/thinkgem/jeesite/modules/act/service/creator/RuntimeActivityDefinitionEntityIntepreter.java | 1784 | package com.thinkgem.jeesite.modules.act.service.creator;
import java.util.List;
/**
* RuntimeActivityDefinitionEntity的解释类(代理类)
* 主要用以解释properties字段的值,如为get("name")提供getName()方法
*
* @author bluejoe2008@gmail.com
*
*/
public class RuntimeActivityDefinitionEntityIntepreter
{
RuntimeActivityDefinitionEntity _entity;
public RuntimeActivityDefinitionEntityIntepreter(RuntimeActivityDefinitionEntity entity)
{
super();
_entity = entity;
}
public List<String> getAssignees()
{
return _entity.getProperty("assignees");
}
public String getCloneActivityId()
{
return _entity.getProperty("cloneActivityId");
}
public List<String> getCloneActivityIds()
{
return _entity.getProperty("cloneActivityIds");
}
public String getNextActivityId()
{
return _entity.getProperty("nextActivityId");
}
public String getPrototypeActivityId()
{
return _entity.getProperty("prototypeActivityId");
}
public boolean getSequential()
{
return (Boolean) _entity.getProperty("sequential");
}
public void setAssignees(List<String> assignees)
{
_entity.setProperty("assignees", assignees);
}
public void setCloneActivityId(String cloneActivityId)
{
_entity.setProperty("cloneActivityId", cloneActivityId);
}
public void setCloneActivityIds(List<String> cloneActivityIds)
{
_entity.setProperty("cloneActivityIds", cloneActivityIds);
}
public void setNextActivityId(String nextActivityId)
{
_entity.setProperty("nextActivityId", nextActivityId);
}
public void setPrototypeActivityId(String prototypeActivityId)
{
_entity.setProperty("prototypeActivityId", prototypeActivityId);
}
public void setSequential(boolean sequential)
{
_entity.setProperty("sequential", sequential);
}
}
| apache-2.0 |
RAHULYERRAMSETTI1/SPACEACTIVITY | src/andy/nasa/main/widget/ContactItemInterface.java | 412 | package andy.nasa.main.widget;
public interface ContactItemInterface {
public String getItemForIndex(); // return the item that we want to categorize under this index. It can be first_name or last_name or display_name
// e.g. "Albert Tan" , "Amy Green" , "Alex Ferguson" will fall under index A
public String getURL(); // "Ben Alpha", "Ben Beta" will fall under index B
}
| apache-2.0 |
Mercateo/rest-schemagen | src/main/java/com/mercateo/common/rest/schemagen/plugin/common/TargetSchemaEnablerForLinkFactory.java | 538 | package com.mercateo.common.rest.schemagen.plugin.common;
import com.mercateo.common.rest.schemagen.plugin.TargetSchemaEnablerForLink;
import org.glassfish.hk2.api.Factory;
public class TargetSchemaEnablerForLinkFactory implements Factory<TargetSchemaEnablerForLink> {
@Override
public TargetSchemaEnablerForLink provide() {
return TargetSchemaEnablerForLink.fromPredicate(scope -> true);
}
@Override
public void dispose(TargetSchemaEnablerForLink targetSchemaEnablerForLink) {
// nothing
}
}
| apache-2.0 |
satabin/sablecc | src/org/sablecc/sablecc/codegeneration/scala/MDefaultPackage.java | 784 | /* This file was generated by SableCC's ObjectMacro. */
package org.sablecc.sablecc.codegeneration.scala;
public class MDefaultPackage {
private final String pLanguageName;
private final MDefaultPackage mDefaultPackage = this;
public MDefaultPackage(String pLanguageName) {
if(pLanguageName == null) throw new NullPointerException();
this.pLanguageName = pLanguageName;
}
String pLanguageName() {
return this.pLanguageName;
}
private String rLanguageName() {
return this.mDefaultPackage.pLanguageName();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("package language_");
sb.append(rLanguageName());
sb.append(System.getProperty("line.separator"));
return sb.toString();
}
}
| apache-2.0 |
rgooch/Dominator | fleetmanager/hypervisors/update.go | 23520 | package hypervisors
import (
"flag"
"fmt"
"io"
"net"
"strings"
"time"
"github.com/Cloud-Foundations/Dominator/fleetmanager/topology"
"github.com/Cloud-Foundations/Dominator/lib/constants"
"github.com/Cloud-Foundations/Dominator/lib/errors"
"github.com/Cloud-Foundations/Dominator/lib/log/prefixlogger"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/lib/tags"
fm_proto "github.com/Cloud-Foundations/Dominator/proto/fleetmanager"
hyper_proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
type addressPoolOptionsType struct {
desiredSize uint
maximumSize uint
minimumSize uint
}
var (
defaultAddressPoolOptions addressPoolOptionsType
errorNoAccessToResource = errors.New("no access to resource")
manageHypervisors = flag.Bool("manageHypervisors", false,
"If true, manage hypervisors")
)
func init() {
flag.UintVar(&defaultAddressPoolOptions.desiredSize,
"desiredAddressPoolSize", 16,
"Desired number of free addresses to maintain in Hypervisor")
flag.UintVar(&defaultAddressPoolOptions.maximumSize,
"maximumAddressPoolSize", 24,
"Maximum number of free addresses to maintain in Hypervisor")
flag.UintVar(&defaultAddressPoolOptions.minimumSize,
"minimumAddressPoolSize", 8,
"Minimum number of free addresses to maintain in Hypervisor")
}
func checkPoolLimits() error {
if defaultAddressPoolOptions.desiredSize <
defaultAddressPoolOptions.minimumSize {
return fmt.Errorf(
"desiredAddressPoolSize: %d is less than minimumAddressPoolSize: %d",
defaultAddressPoolOptions.desiredSize,
defaultAddressPoolOptions.minimumSize)
}
if defaultAddressPoolOptions.desiredSize >
defaultAddressPoolOptions.maximumSize {
return fmt.Errorf(
"desiredAddressPoolSize: %d is greater than maximumAddressPoolSize: %d",
defaultAddressPoolOptions.desiredSize,
defaultAddressPoolOptions.maximumSize)
}
return nil
}
func stringSliceToSet(strings []string) map[string]struct{} {
set := make(map[string]struct{}, len(strings))
for _, entry := range strings {
set[entry] = struct{}{}
}
return set
}
func testInLocation(location, enclosingLocation string) bool {
if enclosingLocation != "" && location != enclosingLocation {
if len(enclosingLocation) >= len(location) {
return false
}
if location[len(enclosingLocation)] != '/' {
return false
}
if location[:len(enclosingLocation)] != enclosingLocation {
return false
}
}
return true
}
func (h *hypervisorType) address() string {
hostname := h.machine.Hostname
if len(h.machine.HostIpAddress) > 0 {
hostname = h.machine.HostIpAddress.String()
}
return fmt.Sprintf("%s:%d", hostname, constants.HypervisorPortNumber)
}
func (h *hypervisorType) changeOwners(client *srpc.Client) error {
if !*manageHypervisors {
return nil
}
if client == nil {
var err error
client, err = srpc.DialHTTP("tcp", h.address(), time.Second*15)
if err != nil {
return err
}
defer client.Close()
}
request := hyper_proto.ChangeOwnersRequest{
OwnerGroups: h.machine.OwnerGroups,
OwnerUsers: h.machine.OwnerUsers,
}
var reply hyper_proto.ChangeOwnersResponse
err := client.RequestReply("Hypervisor.ChangeOwners", request, &reply)
if err != nil {
return err
}
return errors.New(reply.Error)
}
func (h *hypervisorType) checkAuth(authInfo *srpc.AuthInformation) error {
if authInfo.HaveMethodAccess {
return nil
}
if _, ok := h.ownerUsers[authInfo.Username]; ok {
return nil
}
for _, ownerGroup := range h.machine.OwnerGroups {
if _, ok := authInfo.GroupList[ownerGroup]; ok {
return nil
}
}
return errorNoAccessToResource
}
func (h *hypervisorType) getMachineLocked() *fm_proto.Machine {
if len(h.localTags) < 1 {
return h.machine
}
var machine fm_proto.Machine
machine = *h.machine
machine.Tags = h.machine.Tags.Copy()
machine.Tags.Merge(h.localTags)
return &machine
}
func (m *Manager) changeMachineTags(hostname string,
authInfo *srpc.AuthInformation, tgs tags.Tags) error {
if !*manageHypervisors {
return errors.New("this is a read-only Fleet Manager")
}
if h, err := m.getLockedHypervisor(hostname, true); err != nil {
return err
} else if err := h.checkAuth(authInfo); err != nil {
h.mutex.Unlock()
return err
} else {
for key, localVal := range tgs { // Delete duplicates.
if machineVal := h.machine.Tags[key]; localVal == machineVal {
delete(tgs, key)
}
}
err := m.storer.WriteMachineTags(h.machine.HostIpAddress, tgs)
if err != nil {
h.mutex.Unlock()
return err
}
if len(tgs) > 0 {
h.localTags = tgs
} else {
h.localTags = nil
}
update := &fm_proto.Update{
ChangedMachines: []*fm_proto.Machine{h.getMachineLocked()},
}
location := h.location
h.mutex.Unlock()
m.sendUpdate(location, update)
return nil
}
}
func (h *hypervisorType) getMachine() *fm_proto.Machine {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.getMachineLocked()
}
func (m *Manager) closeUpdateChannel(channel <-chan fm_proto.Update) {
m.mutex.Lock()
defer m.mutex.Unlock()
delete(m.notifiers[channel].notifiers, channel)
delete(m.notifiers, channel)
}
func (m *Manager) makeUpdateChannel(locationStr string) <-chan fm_proto.Update {
channel := make(chan fm_proto.Update, 16)
m.mutex.Lock()
defer m.mutex.Unlock()
if m.locations == nil {
m.locations = make(map[string]*locationType)
}
if m.notifiers == nil {
m.notifiers = make(map[<-chan fm_proto.Update]*locationType)
}
location, ok := m.locations[locationStr]
if !ok {
location = &locationType{
notifiers: make(map[<-chan fm_proto.Update]chan<- fm_proto.Update),
}
m.locations[locationStr] = location
}
location.notifiers[channel] = channel
m.notifiers[channel] = location
if !*manageHypervisors {
channel <- fm_proto.Update{Error: "this is a read-only Fleet Manager"}
return channel
}
machines := make([]*fm_proto.Machine, 0)
vms := make(map[string]*hyper_proto.VmInfo, len(m.vms))
for _, h := range m.hypervisors {
if !testInLocation(h.location, locationStr) {
continue
}
machines = append(machines, h.getMachine())
for addr, vm := range h.vms {
vms[addr] = &vm.VmInfo
}
}
channel <- fm_proto.Update{
ChangedMachines: machines,
ChangedVMs: vms,
}
return channel
}
func (m *Manager) updateHypervisor(h *hypervisorType,
machine *fm_proto.Machine) {
location, _ := m.topology.GetLocationOfMachine(machine.Hostname)
var numTagsToDelete uint
h.mutex.Lock()
h.location = location
h.machine = machine
h.ownerUsers = stringSliceToSet(machine.OwnerUsers)
subnets := h.subnets
for key, localVal := range h.localTags {
if machineVal, ok := h.machine.Tags[key]; ok && localVal == machineVal {
delete(h.localTags, key)
numTagsToDelete++
}
}
if numTagsToDelete > 0 {
err := m.storer.WriteMachineTags(h.machine.HostIpAddress, h.localTags)
if err != nil {
h.logger.Printf("error writing tags: %s\n", err)
} else {
h.logger.Debugf(0, "Deleted %d obsolete local tags\n",
numTagsToDelete)
}
}
h.mutex.Unlock()
if *manageHypervisors && h.probeStatus == probeStatusConnected {
go h.changeOwners(nil)
go m.processSubnetsUpdates(h, subnets)
}
}
func (m *Manager) updateTopology(t *topology.Topology) {
machines, err := t.ListMachines("")
if err != nil {
m.logger.Println(err)
return
}
deleteList := m.updateTopologyLocked(t, machines)
for _, hypervisor := range deleteList {
m.storer.UnregisterHypervisor(hypervisor.machine.HostIpAddress)
hypervisor.delete()
}
}
func (m *Manager) updateTopologyLocked(t *topology.Topology,
machines []*fm_proto.Machine) []*hypervisorType {
hypervisorsToDelete := make(map[string]struct{}, len(machines))
m.mutex.Lock()
defer m.mutex.Unlock()
m.topology = t
for hypervisorName := range m.hypervisors {
hypervisorsToDelete[hypervisorName] = struct{}{}
}
var hypersToChange, hypersToDelete []*hypervisorType
for _, machine := range machines {
delete(hypervisorsToDelete, machine.Hostname)
if hypervisor, ok := m.hypervisors[machine.Hostname]; ok {
if !hypervisor.machine.Equal(machine) {
hypersToChange = append(hypersToChange, hypervisor)
}
m.updateHypervisor(hypervisor, machine)
} else {
location, _ := m.topology.GetLocationOfMachine(machine.Hostname)
hypervisor := &hypervisorType{
logger: prefixlogger.New(machine.Hostname+": ", m.logger),
location: location,
machine: machine,
migratingVms: make(map[string]*vmInfoType),
ownerUsers: stringSliceToSet(machine.OwnerUsers),
vms: make(map[string]*vmInfoType),
}
m.hypervisors[machine.Hostname] = hypervisor
hypersToChange = append(hypersToChange, hypervisor)
go m.manageHypervisorLoop(hypervisor)
}
}
deleteList := make([]*hypervisorType, 0, len(hypervisorsToDelete))
for hypervisorName := range hypervisorsToDelete {
hypervisor := m.hypervisors[hypervisorName]
deleteList = append(deleteList, hypervisor)
delete(m.hypervisors, hypervisorName)
hypersToDelete = append(hypersToDelete, hypervisor)
for vmIP := range hypervisor.migratingVms {
delete(m.vms, vmIP)
}
for vmIP := range hypervisor.vms {
delete(m.vms, vmIP)
}
}
if len(hypersToChange) > 0 || len(hypersToDelete) > 0 {
updates := m.splitChanges(hypersToChange, hypersToDelete)
for location, updateForLocation := range updates {
m.sendUpdate(location, updateForLocation)
}
}
subnetsToDelete := make(map[string]struct{}, len(m.subnets))
for gatewayIp := range m.subnets {
subnetsToDelete[gatewayIp] = struct{}{}
}
t.Walk(func(directory *topology.Directory) error {
for _, tSubnet := range directory.Subnets {
gatewayIp := tSubnet.IpGateway.String()
delete(subnetsToDelete, gatewayIp)
m.subnets[gatewayIp] = m.makeSubnet(tSubnet)
}
return nil
})
for gatewayIp := range subnetsToDelete {
delete(m.subnets, gatewayIp)
}
return deleteList
}
func (h *hypervisorType) delete() {
h.mutex.Lock()
defer h.mutex.Unlock()
h.deleteScheduled = true
if h.conn != nil {
h.conn.Close()
h.conn = nil
}
}
func (h *hypervisorType) isDeleteScheduled() bool {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.deleteScheduled
}
func (m *Manager) manageHypervisorLoop(h *hypervisorType) {
vmList, err := m.storer.ListVMs(h.machine.HostIpAddress)
if err != nil {
h.logger.Printf("error reading VMs, not managing hypervisor: %s", err)
return
}
h.cachedSerialNumber, err = m.storer.ReadMachineSerialNumber(
h.machine.HostIpAddress)
if err != nil {
h.logger.Printf(
"error reading serial number, not managing hypervisor: %s", err)
return
}
h.serialNumber = h.cachedSerialNumber
h.localTags, err = m.storer.ReadMachineTags(h.machine.HostIpAddress)
if err != nil {
h.logger.Printf("error reading tags, not managing hypervisor: %s", err)
return
}
for _, vmIpAddr := range vmList {
pVmInfo, err := m.storer.ReadVm(h.machine.HostIpAddress, vmIpAddr)
if err != nil {
h.logger.Printf("error reading VM: %s: %s", vmIpAddr, err)
continue
}
vmInfo := &vmInfoType{vmIpAddr, *pVmInfo, h}
h.vms[vmIpAddr] = vmInfo
m.mutex.Lock()
m.vms[vmIpAddr] = vmInfo
m.mutex.Unlock()
}
for !h.isDeleteScheduled() {
sleepTime := m.manageHypervisor(h)
time.Sleep(sleepTime)
}
}
func (m *Manager) manageHypervisor(h *hypervisorType) time.Duration {
failureProbeStatus := probeStatusUnreachable
defer func() {
h.mutex.Lock()
defer h.mutex.Unlock()
h.probeStatus = failureProbeStatus
if h.conn != nil {
h.conn.Close()
h.conn = nil
}
}()
client, err := srpc.DialHTTP("tcp", h.address(), time.Second*15)
if err != nil {
h.logger.Debugln(1, err)
switch err {
case srpc.ErrorAccessToMethodDenied:
failureProbeStatus = probeStatusAccessDenied
case srpc.ErrorNoSrpcEndpoint:
failureProbeStatus = probeStatusNoSrpc
case srpc.ErrorConnectionRefused:
failureProbeStatus = probeStatusConnectionRefused
default:
failureProbeStatus = m.probeUnreachable(h)
}
return time.Second
}
defer client.Close()
if err := h.changeOwners(client); err != nil {
if strings.HasPrefix(err.Error(), "unknown service") {
h.logger.Debugln(1, err)
} else {
h.logger.Println(err)
}
}
conn, err := client.Call("Hypervisor.GetUpdates")
if err != nil {
if strings.HasPrefix(err.Error(), "unknown service") {
h.logger.Debugln(1, err)
failureProbeStatus = probeStatusNoService
return time.Minute
} else {
h.logger.Println(err)
}
return time.Second
}
h.mutex.Lock()
h.probeStatus = probeStatusConnected
if h.deleteScheduled {
h.mutex.Unlock()
conn.Close()
return 0
}
h.conn = conn
h.receiveChannel = make(chan struct{}, 1)
h.mutex.Unlock()
go h.monitorLoop(client, conn)
defer close(h.receiveChannel)
h.logger.Debugln(0, "waiting for Update messages")
firstUpdate := true
for {
var update hyper_proto.Update
if err := conn.Decode(&update); err != nil {
if err == io.EOF {
h.logger.Debugln(0, "remote closed connection")
} else {
h.logger.Println(err)
}
return time.Second
}
h.receiveChannel <- struct{}{}
m.processHypervisorUpdate(h, update, firstUpdate)
firstUpdate = false
}
}
func (m *Manager) getSubnetsForMachine(h *hypervisorType) (
map[string]*topology.Subnet, error) {
m.mutex.Lock()
subnetsSlice, err := m.topology.GetSubnetsForMachine(h.machine.Hostname)
m.mutex.Unlock()
if err != nil {
return nil, err
}
subnetsMap := make(map[string]*topology.Subnet, len(subnetsSlice))
for _, subnet := range subnetsSlice {
subnetsMap[subnet.Id] = subnet
}
return subnetsMap, nil
}
func (m *Manager) processAddressPoolUpdates(h *hypervisorType,
update hyper_proto.Update) {
if update.HaveAddressPool {
h.logger.Debugf(1, "registered address pool size: %d\n",
len(update.AddressPool))
addresses := make([]net.IP, 0, len(update.AddressPool))
for _, address := range update.AddressPool {
addresses = append(addresses, address.IpAddress)
}
err := m.storer.SetIPsForHypervisor(h.machine.HostIpAddress,
addresses)
if err != nil {
h.logger.Println(err)
}
}
ipsToAdd := make([]net.IP, 0)
addressesToAdd := make([]hyper_proto.Address, 0)
maxFreeAddresses := make(map[string]uint)
tSubnets, err := m.getSubnetsForMachine(h)
if err != nil {
h.logger.Println(err)
return
}
addressPoolOptions := defaultAddressPoolOptions
if h.healthStatus == "marginal" || h.healthStatus == "at risk" {
addressPoolOptions.desiredSize = 1
addressPoolOptions.maximumSize = 1
addressPoolOptions.minimumSize = 1
}
var numAddressesToRemove uint
for subnetId, numFreeAddresses := range update.NumFreeAddresses {
tSubnet := tSubnets[subnetId]
if tSubnet == nil {
h.logger.Printf("update for missing subnet: %s\n", subnetId)
return
}
if !tSubnet.Manage {
continue
}
if numFreeAddresses < addressPoolOptions.minimumSize {
m.mutex.Lock()
freeIPs, err := m.findFreeIPs(tSubnet,
addressPoolOptions.desiredSize-numFreeAddresses)
defer m.unmarkAllocatingIPs(freeIPs)
m.mutex.Unlock()
if err != nil {
h.logger.Println(err)
return
}
if len(freeIPs) < 1 {
continue
}
for _, ip := range freeIPs {
ipsToAdd = append(ipsToAdd, ip)
addressesToAdd = append(addressesToAdd, hyper_proto.Address{
IpAddress: ip,
MacAddress: fmt.Sprintf("52:54:%02x:%02x:%02x:%02x",
ip[0], ip[1], ip[2], ip[3]),
})
}
h.logger.Debugf(0, "Adding %d addresses to subnet: %s\n",
len(freeIPs), subnetId)
} else if numFreeAddresses > addressPoolOptions.maximumSize {
maxFreeAddresses[subnetId] = addressPoolOptions.desiredSize
numAddressesToRemove += numFreeAddresses -
addressPoolOptions.desiredSize
h.logger.Debugf(0, "Removing %d excess addresses from subnet: %s\n",
numFreeAddresses-addressPoolOptions.maximumSize, subnetId)
}
}
if len(addressesToAdd) < 1 && len(maxFreeAddresses) < 1 {
return
}
client, err := srpc.DialHTTP("tcp", h.address(), time.Minute)
if err != nil {
h.logger.Println(err)
return
}
defer client.Close()
request := hyper_proto.ChangeAddressPoolRequest{
AddressesToAdd: addressesToAdd,
MaximumFreeAddresses: maxFreeAddresses,
}
var reply hyper_proto.ChangeAddressPoolResponse
err = client.RequestReply("Hypervisor.ChangeAddressPool",
request, &reply)
if err == nil {
err = errors.New(reply.Error)
}
if err != nil {
h.logger.Println(err)
return
}
m.storer.AddIPsForHypervisor(h.machine.HostIpAddress, ipsToAdd)
if len(addressesToAdd) > 0 {
h.logger.Debugf(0, "replenished pool with %d addresses\n",
len(addressesToAdd))
}
if len(maxFreeAddresses) > 0 {
h.logger.Debugf(0, "removed %d excess addresses from pool\n",
numAddressesToRemove)
}
}
func (m *Manager) processHypervisorUpdate(h *hypervisorType,
update hyper_proto.Update, firstUpdate bool) {
h.mutex.Lock()
oldHealthStatus := h.healthStatus
h.healthStatus = update.HealthStatus
oldSerialNumber := h.serialNumber
if update.HaveSerialNumber && update.SerialNumber != "" {
h.serialNumber = update.SerialNumber
}
h.mutex.Unlock()
if !firstUpdate && update.HealthStatus != oldHealthStatus {
h.logger.Printf("health status changed from: \"%s\" to: \"%s\"\n",
oldHealthStatus, update.HealthStatus)
}
if *manageHypervisors {
if update.HaveSubnets { // Must do subnets first.
h.mutex.Lock()
h.subnets = update.Subnets
h.mutex.Unlock()
m.processSubnetsUpdates(h, update.Subnets)
}
m.processAddressPoolUpdates(h, update)
}
if update.HaveSerialNumber && update.SerialNumber != "" &&
update.SerialNumber != oldSerialNumber {
err := m.storer.WriteMachineSerialNumber(h.machine.HostIpAddress,
update.SerialNumber)
if err != nil {
h.logger.Println(err)
} else {
h.mutex.Lock()
h.cachedSerialNumber = update.SerialNumber
h.mutex.Unlock()
}
}
if update.HaveVMs {
if firstUpdate {
m.processInitialVMs(h, update.VMs)
} else {
m.processVmUpdates(h, update.VMs)
}
}
}
func (m *Manager) processInitialVMs(h *hypervisorType,
vms map[string]*hyper_proto.VmInfo) {
m.mutex.Lock()
defer m.mutex.Unlock()
for ipAddr := range h.vms {
if _, ok := vms[ipAddr]; !ok {
vms[ipAddr] = nil
}
}
for ipAddr := range h.migratingVms {
if _, ok := vms[ipAddr]; !ok {
vms[ipAddr] = nil
}
}
m.processVmUpdatesWithLock(h, vms)
}
func (m *Manager) processSubnetsUpdates(h *hypervisorType,
haveSubnets []hyper_proto.Subnet) {
haveSubnetsMap := make(map[string]int, len(haveSubnets))
for index, subnet := range haveSubnets {
haveSubnetsMap[subnet.Id] = index
}
t, err := m.getTopology()
if err != nil {
h.logger.Println(err)
return
}
needSubnets, err := t.GetSubnetsForMachine(h.machine.Hostname)
if err != nil {
h.logger.Println(err)
return
}
subnetsToDelete := make(map[string]struct{}, len(haveSubnets))
for _, subnet := range haveSubnets {
subnetsToDelete[subnet.Id] = struct{}{}
}
var request hyper_proto.UpdateSubnetsRequest
for _, needSubnet := range needSubnets {
if index, ok := haveSubnetsMap[needSubnet.Id]; ok {
haveSubnet := haveSubnets[index]
delete(subnetsToDelete, haveSubnet.Id)
if !needSubnet.Equal(&haveSubnet) {
request.Change = append(request.Change, needSubnet.Subnet)
}
} else {
request.Add = append(request.Add, needSubnet.Subnet)
}
}
for subnetId := range subnetsToDelete {
request.Delete = append(request.Delete, subnetId)
}
if len(request.Add) < 1 && len(request.Change) < 1 &&
len(request.Delete) < 1 {
return
}
client, err := srpc.DialHTTP("tcp", h.address(), time.Minute)
if err != nil {
h.logger.Println(err)
return
}
defer client.Close()
var reply hyper_proto.UpdateSubnetsResponse
err = client.RequestReply("Hypervisor.UpdateSubnets", request, &reply)
if err == nil {
err = errors.New(reply.Error)
}
if err != nil {
h.logger.Println(err)
return
}
h.logger.Debugf(0, "Added %d, changed %d and deleted %d subnets\n",
len(request.Add), len(request.Change), len(request.Delete))
}
func (m *Manager) processVmUpdates(h *hypervisorType,
updateVMs map[string]*hyper_proto.VmInfo) {
for ipAddr, vm := range updateVMs {
if len(vm.Volumes) < 1 {
updateVMs[ipAddr] = nil
}
}
m.mutex.Lock()
defer m.mutex.Unlock()
m.processVmUpdatesWithLock(h, updateVMs)
}
func (m *Manager) processVmUpdatesWithLock(h *hypervisorType,
updateVMs map[string]*hyper_proto.VmInfo) {
update := fm_proto.Update{ChangedVMs: make(map[string]*hyper_proto.VmInfo)}
vmsToDelete := make(map[string]struct{})
for ipAddr, protoVm := range updateVMs {
if protoVm == nil {
if _, ok := h.migratingVms[ipAddr]; !ok {
vmsToDelete[ipAddr] = struct{}{}
} else {
delete(h.migratingVms, ipAddr)
delete(m.migratingIPs, ipAddr)
h.logger.Debugf(0, "forgot migrating VM: %s\n", ipAddr)
}
} else {
if protoVm.State == hyper_proto.StateMigrating {
if _, ok := h.vms[ipAddr]; ok {
vmsToDelete[ipAddr] = struct{}{}
}
h.migratingVms[ipAddr] = &vmInfoType{ipAddr, *protoVm, h}
m.migratingIPs[ipAddr] = struct{}{}
} else if vm, ok := h.vms[ipAddr]; ok {
if !vm.VmInfo.Equal(protoVm) {
err := m.storer.WriteVm(h.machine.HostIpAddress, ipAddr,
*protoVm)
if err != nil {
h.logger.Printf("error writing VM: %s: %s\n",
ipAddr, err)
} else {
h.logger.Debugf(0, "updated VM: %s\n", ipAddr)
}
}
vm.VmInfo = *protoVm
update.ChangedVMs[ipAddr] = protoVm
} else {
if _, ok := h.migratingVms[ipAddr]; ok {
delete(h.migratingVms, ipAddr)
delete(m.migratingIPs, ipAddr)
}
vm := &vmInfoType{ipAddr, *protoVm, h}
h.vms[ipAddr] = vm
m.vms[ipAddr] = vm
err := m.storer.WriteVm(h.machine.HostIpAddress, ipAddr,
*protoVm)
if err != nil {
h.logger.Printf("error writing VM: %s: %s\n", ipAddr, err)
} else {
h.logger.Debugf(0, "wrote VM: %s\n", ipAddr)
}
update.ChangedVMs[ipAddr] = protoVm
}
}
}
for ipAddr := range vmsToDelete {
delete(h.vms, ipAddr)
delete(m.vms, ipAddr)
err := m.storer.DeleteVm(h.machine.HostIpAddress, ipAddr)
if err != nil {
h.logger.Printf("error deleting VM: %s: %s\n", ipAddr, err)
} else {
h.logger.Debugf(0, "deleted VM: %s\n", ipAddr)
}
update.DeletedVMs = append(update.DeletedVMs, ipAddr)
}
m.sendUpdate(h.location, &update)
}
func (m *Manager) splitChanges(hypersToChange []*hypervisorType,
hypersToDelete []*hypervisorType) map[string]*fm_proto.Update {
updates := make(map[string]*fm_proto.Update)
for _, h := range hypersToChange {
if locationUpdate, ok := updates[h.location]; !ok {
updates[h.location] = &fm_proto.Update{
ChangedMachines: []*fm_proto.Machine{h.getMachine()},
}
} else {
locationUpdate.ChangedMachines = append(
locationUpdate.ChangedMachines, h.getMachine())
}
}
for _, h := range hypersToDelete {
if locationUpdate, ok := updates[h.location]; !ok {
updates[h.location] = &fm_proto.Update{
DeletedMachines: []string{h.machine.Hostname},
}
} else {
locationUpdate.DeletedMachines = append(
locationUpdate.DeletedMachines, h.machine.Hostname)
}
}
return updates
}
func (m *Manager) sendUpdate(hyperLocation string, update *fm_proto.Update) {
if len(update.ChangedMachines) < 1 && len(update.ChangedVMs) < 1 &&
len(update.DeletedMachines) < 1 && len(update.DeletedVMs) < 1 {
return
}
for locationStr, location := range m.locations {
if !testInLocation(hyperLocation, locationStr) {
continue
}
for _, channel := range location.notifiers {
channel <- *update
}
}
}
| apache-2.0 |
rockmkd/datacollector | docs/generated/oxygen-webhelp/app/nav-links/json/concept_xmx_1wg_gz-d46e106842.js | 256 | define({"topics" : [{"title":"Event Records","href":"datacollector\/UserGuide\/Executors\/Spark.html#concept_qk2_3wg_gz","attributes": {"data-id":"concept_qk2_3wg_gz",},"menu": {"hasChildren":false,},"tocID":"concept_qk2_3wg_gz-d46e106935","topics":[]}]}); | apache-2.0 |
uw-madison-show/shots | deploy/lib/functions_utility.php | 4977 | <?php
/**
* Group of functions and shortcuts I like to use.
*
* Stolen, adapted, and simplified from many different PHP frameworks.
* flourishlib: http://flourishlib.com/
*
*/
/**
* Converts all special characters to entites, using UTF-8.
*
* Stolen from: https://github.com/flourishlib/flourish-classes/blob/master/fHTML.php
*
* @param string|array $content The content to encode
* @return string The encoded content
*
*/
function encode($content)
{
if (is_array($content)) {
return array_map('encode', $content);
}
return htmlentities($content, ENT_QUOTES, 'UTF-8');
}
/**
* Convert a database_field_name into a human readable Database Field Name.
*
* @param string $input_string The string to convert (usually a database field name).
* @return string The converted string.
*
*/
function convertFieldName( $input_string = FALSE )
{
return ucwords(preg_replace('/[\/_\-\.\\+@`~\|]/', ' ', $input_string));
}
/**
* Gets a value from _GET or _POST and converts it to string.
*
* Looks in _GET first then in _POST. Uses filter_input() and applies the FILTER_SANITIZE_STRING filter.
*
* @param string $variable_name The name of the variable you want.
* @return string
*/
function grabString( $variable_name = FALSE )
{
$value = '';
if ( !$variable_name ) { return $value; }
$value = filter_input(INPUT_GET, $variable_name, FILTER_SANITIZE_STRING);
if ( empty($value) ) {
$value = filter_input(INPUT_POST, $variable_name, FILTER_SANITIZE_STRING);
}
return $value;
}
/**
* Makes a new DateTime object, sets the timezone to the app default, and reformats the return value as necessary based on the 2nd and 3rd paramters.
*
* Default timezone is set in the <app_root>/lib/shots/internals/settings_global.php file.
*
* @param object|string|integer $date The date to represent. If you feed in NULL or FALSE you get back FALSE.
* @param string $return_format Defaults to 'string'. Can be one of 'string', 'DateTime', or 'timestamp'. If 'string' then I assume you want an ISO compliant date or datetime string. If 'timestamp', then you get a Unix timestamp as a string. The timestamp can be feed into date() to get a formatted date. If 'DateTime' you get back a DateTime object.
* @param boolean $remove_time Defaults to FALSE. If set to true, all time info is removed before returning the result. If $remove_time is TRUE and $return_format is 'string' you will get, e.g., '2016-05-31'. If $remove_time is TRUE and $return_format is 'DateTime' you will get an object where the time portion of the date is set to '00:00:00.000000'.
*
* @return mixed Returns a string (ISO formatted or Unix Timestamp), DateTime object, or FALSE on failure.
*/
function handleDateString( $date = FALSE, $return_format = 'string', $remove_time = FALSE )
{
global $shots_default_timezone;
$return_value = FALSE;
if ( is_integer($date) ) {
// assume this is a unix timestamp and assume it is coming in
// with the default timezone since unix timestamps do not
try {
$datetime_object = new DateTime();
$datetime_object->setTimestamp($date);
$datetime_object->setTimezone($shots_default_timezone);
} catch (Exception $e) {
trigger_error($e);
// return FALSE;
}
} elseif ( is_string($date) ) {
// if the date is coming in with a specified timezone doing the
// create and setTimezone in two steps will move it to the SHOTs
// timezone
try {
$datetime_object = new DateTime($date);
$datetime_object->setTimezone($shots_default_timezone);
} catch (Exception $e) {
return FALSE;
}
} elseif ( is_object($date) ) {
// try to coerce this object into a string and then do the same
// datetime thing
try {
$date = $date->__toString();
$datetime_object = new DateTime($date);
$datetime_object->setTimezone($shots_default_timezone);
} catch (Exception $e) {
return FALSE;
}
} else {
// if date came in as an array, boolean, null, etc.;
return FALSE;
}
if ( isset($datetime_object) ){
if ( $remove_time ){
$datetime_object->setTime(0, 0, 0);
}
if ( $return_format === 'timestamp' ){
$return_value = (string) date_timestamp_get($datetime_object);
} elseif ( $return_format == 'DateTime' ){
$return_value = $datetime_object;
} else {
// this means that you want a string (or that you
// gave me some unspecified format and you're going to get
// a string and just have to deal)
// TODO maybe i just want handleDateString() to return hours and minutes and discard the seconds?
$string_format_to_return = 'Y-m-d H:i:s';
if ( $remove_time ){
$string_format_to_return = 'Y-m-d';
}
$return_value = date_format($datetime_object,
$string_format_to_return
);
}
}
return $return_value;
}
?> | apache-2.0 |
albahrani/aquacontrol-server | src/main/java/com/github/albahrani/aquacontrol/server/json/JSONConfigurationChannel.java | 1179 | /**
* Copyright © 2017 albahrani (https://github.com/albahrani)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.albahrani.aquacontrol.server.json;
import java.util.List;
public class JSONConfigurationChannel {
private String name;
private String color;
private List<String> pins;
public List<String> getPins() {
return pins;
}
public String getName() {
return name;
}
public void setName(String channelName) {
this.name = channelName;
}
public void setPins(List<String> pins) {
this.pins = pins;
}
public String getColor() {
return this.color;
}
public void setColor(String color) {
this.color = color;
}
}
| apache-2.0 |
radiasoft/radtrack | radtrack/ui/globalgu.py | 8241 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'radtrack/ui/globalgu.ui'
#
# Created: Fri Feb 19 03:27:59 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_globalgu(object):
def setupUi(self, globalgu):
globalgu.setObjectName(_fromUtf8("globalgu"))
globalgu.resize(772, 490)
self.centralwidget = QtGui.QWidget(globalgu)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout.addLayout(self.verticalLayout)
globalgu.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(globalgu)
self.menubar.setGeometry(QtCore.QRect(0, 0, 772, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuRecent_Projects = QtGui.QMenu(self.menuFile)
self.menuRecent_Projects.setEnabled(False)
self.menuRecent_Projects.setObjectName(_fromUtf8("menuRecent_Projects"))
self.menuRecent_Files = QtGui.QMenu(self.menuFile)
self.menuRecent_Files.setEnabled(False)
self.menuRecent_Files.setObjectName(_fromUtf8("menuRecent_Files"))
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
self.menuTabs = QtGui.QMenu(self.menubar)
self.menuTabs.setObjectName(_fromUtf8("menuTabs"))
self.menuNew_Tab = QtGui.QMenu(self.menuTabs)
self.menuNew_Tab.setObjectName(_fromUtf8("menuNew_Tab"))
self.menuExamples = QtGui.QMenu(self.menubar)
self.menuExamples.setObjectName(_fromUtf8("menuExamples"))
globalgu.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(globalgu)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
globalgu.setStatusBar(self.statusbar)
self.actionOpen = QtGui.QAction(globalgu)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionUndo = QtGui.QAction(globalgu)
self.actionUndo.setObjectName(_fromUtf8("actionUndo"))
self.actionRedo = QtGui.QAction(globalgu)
self.actionRedo.setObjectName(_fromUtf8("actionRedo"))
self.actionOpen_Project = QtGui.QAction(globalgu)
self.actionOpen_Project.setObjectName(_fromUtf8("actionOpen_Project"))
self.actionSet_Current_Project_Location = QtGui.QAction(globalgu)
self.actionSet_Current_Project_Location.setObjectName(_fromUtf8("actionSet_Current_Project_Location"))
self.actionOpen_New_RadTrack_Window = QtGui.QAction(globalgu)
self.actionOpen_New_RadTrack_Window.setObjectName(_fromUtf8("actionOpen_New_RadTrack_Window"))
self.actionImport_File = QtGui.QAction(globalgu)
self.actionImport_File.setObjectName(_fromUtf8("actionImport_File"))
self.actionExport_Current_Tab = QtGui.QAction(globalgu)
self.actionExport_Current_Tab.setObjectName(_fromUtf8("actionExport_Current_Tab"))
self.actionClose_Current_Tab = QtGui.QAction(globalgu)
self.actionClose_Current_Tab.setObjectName(_fromUtf8("actionClose_Current_Tab"))
self.actionReopen_Closed_Tab = QtGui.QAction(globalgu)
self.actionReopen_Closed_Tab.setObjectName(_fromUtf8("actionReopen_Closed_Tab"))
self.actionRename_Current_Tab = QtGui.QAction(globalgu)
self.actionRename_Current_Tab.setObjectName(_fromUtf8("actionRename_Current_Tab"))
self.actionCheckForUpdate = QtGui.QAction(globalgu)
self.actionCheckForUpdate.setObjectName(_fromUtf8("actionCheckForUpdate"))
self.actionExit = QtGui.QAction(globalgu)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionLCLS = QtGui.QAction(globalgu)
self.actionLCLS.setObjectName(_fromUtf8("actionLCLS"))
self.actionFODO = QtGui.QAction(globalgu)
self.actionFODO.setObjectName(_fromUtf8("actionFODO"))
self.menuFile.addAction(self.actionOpen_Project)
self.menuFile.addAction(self.actionSet_Current_Project_Location)
self.menuFile.addAction(self.actionOpen_New_RadTrack_Window)
self.menuFile.addAction(self.menuRecent_Projects.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionImport_File)
self.menuFile.addAction(self.actionExport_Current_Tab)
self.menuFile.addAction(self.menuRecent_Files.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionCheckForUpdate)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuEdit.addAction(self.actionUndo)
self.menuEdit.addAction(self.actionRedo)
self.menuTabs.addAction(self.menuNew_Tab.menuAction())
self.menuTabs.addSeparator()
self.menuTabs.addAction(self.actionClose_Current_Tab)
self.menuTabs.addAction(self.actionReopen_Closed_Tab)
self.menuTabs.addSeparator()
self.menuTabs.addAction(self.actionRename_Current_Tab)
self.menuExamples.addAction(self.actionLCLS)
self.menuExamples.addAction(self.actionFODO)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuTabs.menuAction())
self.menubar.addAction(self.menuExamples.menuAction())
self.retranslateUi(globalgu)
QtCore.QMetaObject.connectSlotsByName(globalgu)
def retranslateUi(self, globalgu):
globalgu.setWindowTitle(_translate("globalgu", "MainWindow", None))
self.menuFile.setTitle(_translate("globalgu", "File", None))
self.menuRecent_Projects.setTitle(_translate("globalgu", "Recent Projects", None))
self.menuRecent_Files.setTitle(_translate("globalgu", "Recent Files", None))
self.menuEdit.setTitle(_translate("globalgu", "Edit", None))
self.menuTabs.setTitle(_translate("globalgu", "Tabs", None))
self.menuNew_Tab.setTitle(_translate("globalgu", "New Tab", None))
self.menuExamples.setTitle(_translate("globalgu", "Examples", None))
self.actionOpen.setText(_translate("globalgu", "Open", None))
self.actionUndo.setText(_translate("globalgu", "Undo", None))
self.actionRedo.setText(_translate("globalgu", "Redo", None))
self.actionOpen_Project.setText(_translate("globalgu", "Open Project ...", None))
self.actionSet_Current_Project_Location.setText(_translate("globalgu", "Set Current Project Location ...", None))
self.actionOpen_New_RadTrack_Window.setText(_translate("globalgu", "Open New RadTrack Window ...", None))
self.actionImport_File.setText(_translate("globalgu", "Import File ...", None))
self.actionExport_Current_Tab.setText(_translate("globalgu", "Export Current Tab ...", None))
self.actionClose_Current_Tab.setText(_translate("globalgu", "Close Current Tab", None))
self.actionReopen_Closed_Tab.setText(_translate("globalgu", "Reopen Closed Tab", None))
self.actionRename_Current_Tab.setText(_translate("globalgu", "Rename Current Tab ...", None))
self.actionCheckForUpdate.setText(_translate("globalgu", "Check for updates ...", None))
self.actionExit.setText(_translate("globalgu", "Exit", None))
self.actionLCLS.setText(_translate("globalgu", "LCLS", None))
self.actionFODO.setText(_translate("globalgu", "FODO", None))
| apache-2.0 |
paulmey/azure-sdk-for-go | arm/examples/check.go | 2030 | package examples
import (
"fmt"
"log"
"net/http"
"os"
"github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest"
"github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/azure-sdk-for-go/arm/examples/helpers"
"github.com/Azure/azure-sdk-for-go/arm/storage"
)
func withInspection() autorest.PrepareDecorator {
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
fmt.Printf("Inspecting Request: %s %s\n", r.Method, r.URL)
return p.Prepare(r)
})
}
}
func byInspecting() autorest.RespondDecorator {
return func(r autorest.Responder) autorest.Responder {
return autorest.ResponderFunc(func(resp *http.Response) error {
fmt.Printf("Inspecting Response: %s for %s %s\n", resp.Status, resp.Request.Method, resp.Request.URL)
return r.Respond(resp)
})
}
}
func checkName(name string) {
c, err := helpers.LoadCredentials()
if err != nil {
log.Fatalf("Error: %v", err)
}
ac := storage.NewAccountsClient(c["subscriptionID"])
spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope)
if err != nil {
log.Fatalf("Error: %v", err)
}
ac.Authorizer = spt
ac.Sender = autorest.CreateSender(
autorest.WithLogging(log.New(os.Stdout, "sdk-example: ", log.LstdFlags)))
ac.RequestInspector = withInspection()
ac.ResponseInspector = byInspecting()
cna, err := ac.CheckNameAvailability(
storage.AccountCheckNameAvailabilityParameters{
Name: to.StringPtr(name),
Type: to.StringPtr("Microsoft.Storage/storageAccounts")})
if err != nil {
log.Fatalf("Error: %v", err)
} else {
if to.Bool(cna.NameAvailable) {
fmt.Printf("The name '%s' is available\n", name)
} else {
fmt.Printf("The name '%s' is unavailable because %s\n", name, to.String(cna.Message))
}
}
}
| apache-2.0 |
imasahiro/armeria | core/src/main/java/com/linecorp/armeria/server/annotation/ConsumeTypes.java | 1119 | /*
* Copyright 2017 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.server.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* The containing annotation type for {@link ConsumeType}.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.TYPE, ElementType.METHOD })
public @interface ConsumeTypes {
/**
* An array of {@link ConsumeType}s.
*/
ConsumeType[] value();
}
| apache-2.0 |
mknapik/u2i-jenkins | recipes/default.rb | 339 | #
# Cookbook Name:: u2i-jenkins
# Recipe:: default
#
# Copyright (C) 2014 Michał Knapik
#
# All rights reserved - Do Not Redistribute
#
include_recipe 'u2i-jenkins::_rvm'
include_recipe 'u2i-jenkins::_is_secured'
include_recipe 'u2i-jenkins::jenkins'
include_recipe 'u2i-jenkins::services'
include_recipe 'u2i-jenkins::_service_restart'
| apache-2.0 |
andresmgot/serverless-kubeless | examples/multi-python/handler.py | 133 | def foo(event, context):
print event['data']
return 'foo'
def bar(event, context):
print event['data']
return'bar'
| apache-2.0 |
calonso-conabio/intranet | protected/humhub/modules/post/messages/lt/views_edit.php | 82 | <?php
return array (
'Edit your post...' => 'Redaguokite savo skelbimą...',
);
| apache-2.0 |
GoogleChromeLabs/tooling.report | client/test/index.ts | 633 | /**
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import './breadcrumbs';
| apache-2.0 |
NexusSW/lxd-common | lib/nexussw/lxd/transport/rest.rb | 211 | require "nexussw/lxd/transport"
require "nexussw/lxd/transport/mixins/rest"
module NexusSW
module LXD
class Transport
class Rest < Transport
include Mixins::Rest
end
end
end
end
| apache-2.0 |
xiaoyongaa/ALL | 网络编程第四周/先进先出队列的使用.py | 780 | import queue
#队列,先进先出队列
#put放数据,是否堵塞,阻塞的超时
#
# q=queue.Queue(2) #队列最大长度2
# print(q.empty()) #判断队列是否为空 空就是true
# q.put(11) #放进q队列里面11
# q.put(22)
# print(q.empty())
# print(q.maxsize) #队列的最大长度
# #q.put(33,block=False,timeout=2)
# #print(q.qsize())
# print(q.get()) #取出队列里面数据
# print(q.get())
# #print(q.get())
#join,task_done,阻塞竞猜,当队列中任务执行完毕后,不再阻塞
q=queue.Queue(2)
q.put(1)
q.put(2)
q.get()
q.task_done() #你把任务取出来了,告诉一下
q.get()
q.task_done() #你把任务取出来了,告诉一下
q.join() #如果队列里面的任务还没有完成,我就等待着
#join和task_done一起用
| apache-2.0 |
antlibs/ant-contrib | src/test/resources/design/src/mod/catchdepend/ClassDependsOnCatch.java | 959 | /*
* Copyright (c) 2001-2004 Ant-Contrib project. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Created on Dec 29, 2004
*/
package mod.catchdepend;
import mod.dummy.DummyRuntimeException;
/**
*
* @author dhiller
*/
public class ClassDependsOnCatch {
public void doNothing() {
try {
int x = 0;
int y = x + 4;
} catch (DummyRuntimeException e) {
}
}
}
| apache-2.0 |
afinka77/ignite | modules/core/src/test/java/org/apache/ignite/internal/processors/cache/expiry/IgniteCacheExpiryPolicyAbstractTest.java | 37489 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.expiry;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import javax.cache.Cache;
import javax.cache.configuration.Factory;
import javax.cache.configuration.FactoryBuilder;
import javax.cache.expiry.CreatedExpiryPolicy;
import javax.cache.expiry.Duration;
import javax.cache.expiry.EternalExpiryPolicy;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.expiry.ModifiedExpiryPolicy;
import javax.cache.processor.EntryProcessor;
import javax.cache.processor.MutableEntry;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.cache.CacheMemoryMode;
import org.apache.ignite.cache.CachePeekMode;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.configuration.NearCacheConfiguration;
import org.apache.ignite.internal.IgniteInterruptedCheckedException;
import org.apache.ignite.internal.IgniteKernal;
import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
import org.apache.ignite.internal.processors.cache.IgniteCacheAbstractTest;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException;
import org.apache.ignite.internal.util.lang.GridAbsPredicate;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.PAX;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.testframework.GridTestUtils;
import org.apache.ignite.transactions.Transaction;
import org.apache.ignite.transactions.TransactionConcurrency;
import org.jetbrains.annotations.Nullable;
import static org.apache.ignite.cache.CacheAtomicWriteOrderMode.CLOCK;
import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
import static org.apache.ignite.cache.CacheMode.PARTITIONED;
import static org.apache.ignite.cache.CacheMode.REPLICATED;
import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC;
import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
/**
*
*/
public abstract class IgniteCacheExpiryPolicyAbstractTest extends IgniteCacheAbstractTest {
/** */
private static final long TTL_FOR_EXPIRE = 500L;
/** */
private Factory<? extends ExpiryPolicy> factory;
/** */
private boolean nearCache;
/** */
private boolean disableEagerTtl;
/** */
private Integer lastKey = 0;
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
// No-op.
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
stopAllGrids();
factory = null;
storeMap.clear();
}
/** {@inheritDoc} */
@Override protected CacheConfiguration cacheConfiguration(String gridName) throws Exception {
CacheConfiguration cfg = super.cacheConfiguration(gridName);
if (nearCache)
cfg.setNearConfiguration(new NearCacheConfiguration());
cfg.setExpiryPolicyFactory(factory);
cfg.setMemoryMode(memoryMode());
if (memoryMode() == CacheMemoryMode.OFFHEAP_TIERED)
cfg.setOffHeapMaxMemory(0);
if (disableEagerTtl)
cfg.setEagerTtl(false);
return cfg;
}
/**
* @return Cache memory mode.
*/
protected CacheMemoryMode memoryMode() {
return CacheMemoryMode.ONHEAP_TIERED;
}
/**
* @throws Exception if failed.
*/
public void testCreateUpdate0() throws Exception {
startGrids(1);
long ttl = 60L;
final String key = "key1";
final IgniteCache<String, String> cache = jcache();
for (int i = 0; i < 1000; i++) {
final IgniteCache<String, String> cache0 = cache.withExpiryPolicy(new ModifiedExpiryPolicy(new Duration(TimeUnit.HOURS, ttl)));
cache0.put(key, key);
info("PUT DONE");
}
int pSize = grid(0).context().cache().internalCache(null).context().ttl().pendingSize();
assertTrue("Too many pending entries: " + pSize, pSize <= 1);
cache.remove(key);
pSize = grid(0).context().cache().internalCache(null).context().ttl().pendingSize();
assertEquals(0, pSize);
}
/** * @throws Exception If failed.
*/
public void testZeroOnCreate() throws Exception {
factory = CreatedExpiryPolicy.factoryOf(Duration.ZERO);
startGrids();
for (final Integer key : keys()) {
log.info("Test zero duration on create, key: " + key);
zeroOnCreate(key);
}
}
/**
* @param key Key.
* @throws Exception If failed.
*/
private void zeroOnCreate(Integer key) throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
cache.put(key, 1); // Create with zero duration, should not create cache entry.
checkNoValue(F.asList(key));
}
/**
* @throws Exception If failed.
*/
public void testZeroOnUpdate() throws Exception {
factory = new FactoryBuilder.SingletonFactory<>(new TestPolicy(null, 0L, null));
startGrids();
for (final Integer key : keys()) {
log.info("Test zero duration on update, key: " + key);
zeroOnUpdate(key);
}
}
/**
* @param key Key.
* @throws Exception If failed.
*/
private void zeroOnUpdate(Integer key) throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
cache.put(key, 1); // Create.
assertEquals((Integer)1, cache.get(key));
cache.put(key, 2); // Update should expire entry.
checkNoValue(F.asList(key));
}
/**
* @throws Exception If failed.
*/
public void testZeroOnAccess() throws Exception {
factory = new FactoryBuilder.SingletonFactory<>(new TestPolicy(null, null, 0L));
startGrids();
for (final Integer key : keys()) {
log.info("Test zero duration on access, key: " + key);
zeroOnAccess(key);
}
final IgniteCache<Integer, Object> cache = jcache(0);
Integer key = primaryKey(cache);
IgniteCache<Integer, Object> cache0 = cache.withExpiryPolicy(new TestPolicy(60_000L, 60_000L, 60_000L));
cache0.put(key, 1);
cache.get(key); // Access using get.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return !cache.iterator().hasNext();
}
}, 1000);
assertFalse(cache.iterator().hasNext());
cache0.put(key, 1);
assertNotNull(cache.iterator().next()); // Access using iterator.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return !cache.iterator().hasNext();
}
}, 1000);
assertFalse(cache.iterator().hasNext());
}
/**
* @throws Exception If failed.
*/
public void testZeroOnAccessEagerTtlDisabled() throws Exception {
disableEagerTtl = true;
testZeroOnAccess();
}
/**
* @param key Key.
* @throws Exception If failed.
*/
private void zeroOnAccess(Integer key) throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
cache.put(key, 1); // Create.
assertEquals((Integer)1, cache.get(key)); // Access should expire entry.
waitExpired(F.asList(key));
assertFalse(cache.iterator().hasNext());
}
/**
* @throws Exception If failed.
*/
public void testEternal() throws Exception {
factory = EternalExpiryPolicy.factoryOf();
ExpiryPolicy plc = factory.create();
assertTrue(plc.getExpiryForCreation().isEternal());
assertNull(plc.getExpiryForUpdate());
assertNull(plc.getExpiryForAccess());
startGrids();
for (final Integer key : keys()) {
log.info("Test eternalPolicy, key: " + key);
eternal(key);
}
}
/**
* @throws Exception If failed.
*/
public void testNullFactory() throws Exception {
factory = null; // Should work as eternal.
startGrids();
for (final Integer key : keys()) {
log.info("Test eternalPolicy, key: " + key);
eternal(key);
}
}
/**
* @param key Key.
* @throws Exception If failed.
*/
private void eternal(Integer key) throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
cache.put(key, 1); // Create.
checkTtl(key, 0);
assertEquals((Integer) 1, cache.get(key)); // Get.
checkTtl(key, 0);
cache.put(key, 2); // Update.
checkTtl(key, 0);
assertTrue(cache.remove(key)); // Remove.
cache.withExpiryPolicy(new TestPolicy(60_000L, null, null)).put(key, 1); // Create with custom.
checkTtl(key, 60_000L);
cache.put(key, 2); // Update with eternal, should not change ttl.
checkTtl(key, 60_000L);
cache.withExpiryPolicy(new TestPolicy(null, TTL_FOR_EXPIRE, null)).put(key, 1); // Update with custom.
checkTtl(key, TTL_FOR_EXPIRE);
waitExpired(key);
}
/**
* @throws Exception If failed.
*/
public void testAccess() throws Exception {
factory = new FactoryBuilder.SingletonFactory<>(new TestPolicy(60_000L, 61_000L, 62_000L));
startGrids();
for (final Integer key : keys()) {
log.info("Test access [key=" + key + ']');
access(key);
}
accessGetAll();
for (final Integer key : keys()) {
log.info("Test filterAccessRemove access [key=" + key + ']');
filterAccessRemove(key);
}
for (final Integer key : keys()) {
log.info("Test filterAccessReplace access [key=" + key + ']');
filterAccessReplace(key);
}
if (atomicityMode() == TRANSACTIONAL) {
TransactionConcurrency[] txModes = {PESSIMISTIC};
for (TransactionConcurrency txMode : txModes) {
for (final Integer key : keys()) {
log.info("Test txGet [key=" + key + ", txMode=" + txMode + ']');
txGet(key, txMode);
}
}
for (TransactionConcurrency txMode : txModes) {
log.info("Test txGetAll [txMode=" + txMode + ']');
txGetAll(txMode);
}
}
IgniteCache<Integer, Integer> cache = jcache(0);
Collection<Integer> putKeys = keys();
info("Put keys: " + putKeys);
for (final Integer key : putKeys)
cache.put(key, key);
Iterator<Cache.Entry<Integer, Integer>> it = cache.iterator();
List<Integer> itKeys = new ArrayList<>();
while (it.hasNext())
itKeys.add(it.next().getKey());
info("It keys: " + itKeys);
assertTrue(itKeys.size() >= putKeys.size());
for (Integer key : itKeys) {
info("Checking iterator key: " + key);
checkTtl(key, 62_000L, true);
}
}
/**
* @param key Key.
* @param txMode Transaction concurrency mode.
* @throws Exception If failed.
*/
private void txGet(Integer key, TransactionConcurrency txMode) throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
cache.put(key, 1);
checkTtl(key, 60_000L);
try (Transaction tx = ignite(0).transactions().txStart(txMode, REPEATABLE_READ)) {
assertEquals((Integer)1, cache.get(key));
tx.commit();
}
checkTtl(key, 62_000L, true);
try (Transaction tx = ignite(0).transactions().txStart(txMode, REPEATABLE_READ)) {
assertEquals((Integer)1, cache.withExpiryPolicy(new TestPolicy(100L, 200L, 1000L)).get(key));
tx.commit();
}
checkTtl(key, 1000L, true);
}
/**
* @param txMode Transaction concurrency mode.
* @throws Exception If failed.
*/
private void txGetAll(TransactionConcurrency txMode) throws Exception {
IgniteCache<Integer, Integer> cache = jcache(0);
Map<Integer, Integer> vals = new HashMap<>();
for (int i = 0; i < 1000; i++)
vals.put(i, i);
cache.putAll(vals);
try (Transaction tx = ignite(0).transactions().txStart(txMode, REPEATABLE_READ)) {
assertEquals(vals, cache.getAll(vals.keySet()));
tx.commit();
}
for (Integer key : vals.keySet())
checkTtl(key, 62_000L);
try (Transaction tx = ignite(0).transactions().txStart(txMode, REPEATABLE_READ)) {
assertEquals(vals, cache.withExpiryPolicy(new TestPolicy(100L, 200L, 1000L)).getAll(vals.keySet()));
tx.commit();
}
for (Integer key : vals.keySet())
checkTtl(key, 1000L);
}
/**
* @param key Key.
* @throws Exception If failed.
*/
private void access(Integer key) throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
cache.put(key, 1);
checkTtl(key, 60_000L);
assertEquals((Integer) 1, cache.get(key));
checkTtl(key, 62_000L, true);
IgniteCache<Integer, Integer> cache0 = cache.withExpiryPolicy(new TestPolicy(1100L, 1200L, TTL_FOR_EXPIRE));
assertEquals((Integer)1, cache0.get(key));
checkTtl(key, TTL_FOR_EXPIRE, true);
waitExpired(key);
cache.put(key, 1);
checkTtl(key, 60_000L);
Integer res = cache.invoke(key, new GetEntryProcessor());
assertEquals((Integer)1, res);
checkTtl(key, 62_000L, true);
}
/**
* @param key Key.
* @throws Exception If failed.
*/
private void filterAccessRemove(Integer key) throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
cache.put(key, 1);
checkTtl(key, 60_000L);
assertFalse(cache.remove(key, 2)); // Remove fails, access expiry policy should be used.
checkTtl(key, 62_000L, true);
assertFalse(cache.withExpiryPolicy(new TestPolicy(100L, 200L, 1000L)).remove(key, 2));
checkTtl(key, 1000L, true);
}
/**
* @param key Key.
* @throws Exception If failed.
*/
private void filterAccessReplace(Integer key) throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
cache.put(key, 1);
checkTtl(key, 60_000L);
assertFalse(cache.replace(key, 2, 3)); // Put fails, access expiry policy should be used.
checkTtl(key, 62_000L, true);
assertFalse(cache.withExpiryPolicy(new TestPolicy(100L, 200L, 1000L)).remove(key, 2));
checkTtl(key, 1000L, true);
}
/**
* @throws Exception If failed.
*/
private void accessGetAll() throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
Map<Integer, Integer> vals = new HashMap<>();
for (int i = 0; i < 1000; i++)
vals.put(i, i);
cache.removeAll(vals.keySet());
cache.putAll(vals);
for (Integer key : vals.keySet())
checkTtl(key, 60_000L);
Map<Integer, Integer> vals0 = cache.getAll(vals.keySet());
assertEquals(vals, vals0);
for (Integer key : vals.keySet())
checkTtl(key, 62_000L, true);
vals0 = cache.withExpiryPolicy(new TestPolicy(1100L, 1200L, 1000L)).getAll(vals.keySet());
assertEquals(vals, vals0);
for (Integer key : vals.keySet())
checkTtl(key, 1000L, true);
waitExpired(vals.keySet());
}
/**
* @throws Exception If failed.
*/
public void testCreateUpdate() throws Exception {
factory = new FactoryBuilder.SingletonFactory<>(new TestPolicy(60_000L, 61_000L, null));
startGrids();
for (final Integer key : keys()) {
log.info("Test createUpdate [key=" + key + ']');
createUpdate(key, null);
}
for (final Integer key : keys()) {
log.info("Test createUpdateCustomPolicy [key=" + key + ']');
createUpdateCustomPolicy(key, null);
}
createUpdatePutAll(null);
if (atomicityMode() == TRANSACTIONAL) {
TransactionConcurrency[] txModes = new TransactionConcurrency[]{PESSIMISTIC, OPTIMISTIC};
for (TransactionConcurrency tx : txModes) {
for (final Integer key : keys()) {
log.info("Test createUpdate [key=" + key + ", tx=" + tx + ']');
createUpdate(key, tx);
}
for (final Integer key : keys()) {
log.info("Test createUpdateCustomPolicy [key=" + key + ", tx=" + tx + ']');
createUpdateCustomPolicy(key, tx);
}
createUpdatePutAll(tx);
}
}
}
/**
* @param txConcurrency Not null transaction concurrency mode if explicit transaction should be started.
* @throws Exception If failed.
*/
private void createUpdatePutAll(@Nullable TransactionConcurrency txConcurrency) throws Exception {
Map<Integer, Integer> vals = new HashMap<>();
for (int i = 0; i < 1000; i++)
vals.put(i, i);
IgniteCache<Integer, Integer> cache = jcache(0);
cache.removeAll(vals.keySet());
Transaction tx = startTx(txConcurrency);
// Create.
cache.putAll(vals);
if (tx != null)
tx.commit();
for (Integer key : vals.keySet())
checkTtl(key, 60_000L);
tx = startTx(txConcurrency);
// Update.
cache.putAll(vals);
if (tx != null)
tx.commit();
for (Integer key : vals.keySet())
checkTtl(key, 61_000L);
tx = startTx(txConcurrency);
// Update with provided TTL.
cache.withExpiryPolicy(new TestPolicy(null, 1000L, null)).putAll(vals);
if (tx != null)
tx.commit();
for (Integer key : vals.keySet())
checkTtl(key, 1000L);
waitExpired(vals.keySet());
tx = startTx(txConcurrency);
// Try create again.
cache.putAll(vals);
if (tx != null)
tx.commit();
for (Integer key : vals.keySet())
checkTtl(key, 60_000L);
Map<Integer, Integer> newVals = new HashMap<>(vals);
newVals.put(100_000, 1);
// Updates and create.
cache.putAll(newVals);
for (Integer key : vals.keySet())
checkTtl(key, 61_000L);
checkTtl(100_000, 60_000L);
cache.removeAll(newVals.keySet());
}
/**
* @param key Key.
* @param txConcurrency Not null transaction concurrency mode if explicit transaction should be started.
* @throws Exception If failed.
*/
private void createUpdateCustomPolicy(Integer key, @Nullable TransactionConcurrency txConcurrency)
throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
assertNull(cache.get(key));
Transaction tx = startTx(txConcurrency);
cache.withExpiryPolicy(new TestPolicy(10_000L, 20_000L, 30_000L)).put(key, 1);
if (tx != null)
tx.commit();
checkTtl(key, 10_000L);
for (int idx = 0; idx < gridCount(); idx++) {
assertEquals(1, jcache(idx).get(key)); // Try get.
checkTtl(key, 10_000L);
}
tx = startTx(txConcurrency);
// Update, returns null duration, should not change TTL.
cache.withExpiryPolicy(new TestPolicy(20_000L, null, null)).put(key, 2);
if (tx != null)
tx.commit();
checkTtl(key, 10_000L);
tx = startTx(txConcurrency);
// Update with provided TTL.
cache.withExpiryPolicy(new TestPolicy(null, TTL_FOR_EXPIRE, null)).put(key, 2);
if (tx != null)
tx.commit();
checkTtl(key, TTL_FOR_EXPIRE);
waitExpired(key);
tx = startTx(txConcurrency);
// Create, returns null duration, should create with 0 TTL.
cache.withExpiryPolicy(new TestPolicy(null, 20_000L, 30_000L)).put(key, 1);
if (tx != null)
tx.commit();
checkTtl(key, 0L);
}
/**
* @param key Key.
* @param txConcurrency Not null transaction concurrency mode if explicit transaction should be started.
* @throws Exception If failed.
*/
private void createUpdate(Integer key, @Nullable TransactionConcurrency txConcurrency)
throws Exception {
IgniteCache<Integer, Integer> cache = jcache();
// Run several times to make sure create after remove works as expected.
for (int i = 0; i < 3; i++) {
log.info("Iteration: " + i);
Transaction tx = startTx(txConcurrency);
cache.put(key, 1); // Create.
if (tx != null)
tx.commit();
checkTtl(key, 60_000L);
for (int idx = 0; idx < gridCount(); idx++) {
assertEquals(1, jcache(idx).get(key)); // Try get.
checkTtl(key, 60_000L);
}
tx = startTx(txConcurrency);
cache.put(key, 2); // Update.
if (tx != null)
tx.commit();
checkTtl(key, 61_000L);
for (int idx = 0; idx < gridCount(); idx++) {
assertEquals(2, jcache(idx).get(key)); // Try get.
checkTtl(key, 61_000L);
}
tx = startTx(txConcurrency);
assertTrue(cache.remove(key));
if (tx != null)
tx.commit();
for (int idx = 0; idx < gridCount(); idx++)
assertNull(jcache(idx).get(key));
}
}
/**
* @param txMode Transaction concurrency mode.
* @return Transaction.
*/
@Nullable private Transaction startTx(@Nullable TransactionConcurrency txMode) {
return txMode == null ? null : ignite(0).transactions().txStart(txMode, REPEATABLE_READ);
}
/**
* @throws Exception If failed.
*/
public void testNearCreateUpdate() throws Exception {
fail("https://issues.apache.org/jira/browse/IGNITE-518");
if (cacheMode() != PARTITIONED)
return;
nearCache = true;
testCreateUpdate();
nearReaderUpdate();
nearPutAll();
}
/**
* @throws Exception If failed.
*/
private void nearReaderUpdate() throws Exception {
log.info("Test near reader update.");
Integer key = nearKeys(jcache(0), 1, 500_000).get(0);
IgniteCache<Integer, Integer> cache0 = jcache(0);
assertNotNull(jcache(0).getConfiguration(CacheConfiguration.class).getNearConfiguration());
cache0.put(key, 1);
checkTtl(key, 60_000L);
IgniteCache<Integer, Integer> cache1 = jcache(1);
if (atomicityMode() == ATOMIC && atomicWriteOrderMode() == CLOCK)
Thread.sleep(100);
// Update from another node.
cache1.put(key, 2);
checkTtl(key, 61_000L);
if (atomicityMode() == ATOMIC && atomicWriteOrderMode() == CLOCK)
Thread.sleep(100);
// Update from another node with provided TTL.
cache1.withExpiryPolicy(new TestPolicy(null, TTL_FOR_EXPIRE, null)).put(key, 3);
checkTtl(key, TTL_FOR_EXPIRE);
waitExpired(key);
// Try create again.
cache0.put(key, 1);
checkTtl(key, 60_000L);
if (atomicityMode() == ATOMIC && atomicWriteOrderMode() == CLOCK)
Thread.sleep(100);
// Update from near node with provided TTL.
cache0.withExpiryPolicy(new TestPolicy(null, TTL_FOR_EXPIRE + 1, null)).put(key, 2);
checkTtl(key, TTL_FOR_EXPIRE + 1);
waitExpired(key);
}
/**
* @throws Exception If failed.
*/
private void nearPutAll() throws Exception {
Map<Integer, Integer> vals = new HashMap<>();
for (int i = 0; i < 1000; i++)
vals.put(i, i);
IgniteCache<Integer, Integer> cache0 = jcache(0);
cache0.removeAll(vals.keySet());
cache0.putAll(vals);
for (Integer key : vals.keySet())
checkTtl(key, 60_000L);
if (atomicityMode() == ATOMIC && atomicWriteOrderMode() == CLOCK)
Thread.sleep(100);
IgniteCache<Integer, Integer> cache1 = jcache(1);
// Update from another node.
cache1.putAll(vals);
for (Integer key : vals.keySet())
checkTtl(key, 61_000L);
if (atomicityMode() == ATOMIC && atomicWriteOrderMode() == CLOCK)
Thread.sleep(100);
// Update from another node with provided TTL.
cache1.withExpiryPolicy(new TestPolicy(null, 1000L, null)).putAll(vals);
for (Integer key : vals.keySet())
checkTtl(key, 1000L);
waitExpired(vals.keySet());
// Try create again.
cache0.putAll(vals);
if (atomicityMode() == ATOMIC && atomicWriteOrderMode() == CLOCK)
Thread.sleep(100);
// Update from near node with provided TTL.
cache1.withExpiryPolicy(new TestPolicy(null, 1101L, null)).putAll(vals);
for (Integer key : vals.keySet())
checkTtl(key, 1101L);
waitExpired(vals.keySet());
}
/**
* @throws Exception If failed.
*/
public void testNearAccess() throws Exception {
fail("https://issues.apache.org/jira/browse/IGNITE-518");
if (cacheMode() != PARTITIONED)
return;
nearCache = true;
testAccess();
Integer key = primaryKeys(jcache(0), 1, 500_000).get(0);
IgniteCache<Integer, Integer> cache0 = jcache(0);
cache0.put(key, 1);
checkTtl(key, 60_000L);
assertEquals(1, jcache(1).get(key));
checkTtl(key, 62_000L, true);
assertEquals(1, jcache(2).withExpiryPolicy(new TestPolicy(1100L, 1200L, TTL_FOR_EXPIRE)).get(key));
checkTtl(key, TTL_FOR_EXPIRE, true);
waitExpired(key);
// Test reader update on get.
key = nearKeys(jcache(0), 1, 600_000).get(0);
cache0.put(key, 1);
checkTtl(key, 60_000L);
IgniteCache<Object, Object> cache =
grid(0).affinity(null).isPrimary(grid(1).localNode(), key) ? jcache(1) : jcache(2);
assertEquals(1, cache.get(key));
checkTtl(key, 62_000L, true);
}
/**
* Put entry to server node and check how its expires in client NearCache.
*
* @throws Exception If failed.
*/
public void testNearExpiresOnClient() throws Exception {
if(cacheMode() != PARTITIONED)
return;
factory = CreatedExpiryPolicy.factoryOf(new Duration(TimeUnit.SECONDS,1));
nearCache = true;
startGrids();
IgniteConfiguration clientCfg = getConfiguration("client").setClientMode(true);
((TcpDiscoverySpi)clientCfg.getDiscoverySpi()).setForceServerMode(false);
Ignite client = startGrid("client", clientCfg);
IgniteCache<Object, Object> cache = client.cache(null);
Integer key = 1;
// Put on server node.
jcache(0).put(key, 1);
// Make entry cached in client NearCache.
assertEquals(1, cache.get(key));
assertEquals(1, cache.localPeek(key, CachePeekMode.NEAR));
waitExpired(key);
// Check client NearCache.
assertNull(cache.localPeek(key, CachePeekMode.NEAR));
}
/**
* @return Test keys.
* @throws Exception If failed.
*/
private Collection<Integer> keys() throws Exception {
IgniteCache<Integer, Object> cache = jcache(0);
List<Integer> keys = new ArrayList<>();
keys.add(primaryKeys(cache, 1, lastKey).get(0));
if (gridCount() > 1) {
keys.add(backupKeys(cache, 1, lastKey).get(0));
if (cache.getConfiguration(CacheConfiguration.class).getCacheMode() != REPLICATED)
keys.add(nearKeys(cache, 1, lastKey).get(0));
}
lastKey = Collections.max(keys) + 1;
return keys;
}
/**
* @param key Key.
* @throws Exception If failed.
*/
private void waitExpired(Integer key) throws Exception {
waitExpired(Collections.singleton(key));
}
/**
* @param keys Keys.
* @throws Exception If failed.
*/
private void waitExpired(final Collection<Integer> keys) throws Exception {
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
for (int i = 0; i < gridCount(); i++) {
for (Integer key : keys) {
Object val = jcache(i).localPeek(key, CachePeekMode.ONHEAP);
if (val != null) {
// log.info("Value [grid=" + i + ", val=" + val + ']');
return false;
}
}
}
return false;
}
}, 3000);
checkNoValue(keys);
}
/**
* @param keys Keys.
* @throws Exception If failed.
*/
private void checkNoValue(Collection<Integer> keys) throws Exception {
IgniteCache<Integer, Object> cache = jcache(0);
for (int i = 0; i < gridCount(); i++) {
ClusterNode node = grid(i).cluster().localNode();
for (Integer key : keys) {
Object val = jcache(i).localPeek(key, CachePeekMode.ONHEAP, CachePeekMode.OFFHEAP);
if (val != null) {
log.info("Unexpected value [grid=" + i +
", primary=" + affinity(cache).isPrimary(node, key) +
", backup=" + affinity(cache).isBackup(node, key) + ']');
}
assertNull("Unexpected non-null value for grid " + i, val);
}
}
storeMap.clear();
for (int i = 0; i < gridCount(); i++) {
for (Integer key : keys)
assertNull("Unexpected non-null value for grid " + i, jcache(i).get(key));
}
}
/**
* @param key Key.
* @param ttl TTL.
* @throws Exception If failed.
*/
private void checkTtl(Object key, long ttl) throws Exception {
checkTtl(key, ttl, false);
}
/**
* @param key Key.
* @param ttl TTL.
* @param wait If {@code true} waits for ttl update.
* @throws Exception If failed.
*/
private void checkTtl(Object key, final long ttl, boolean wait) throws Exception {
boolean found = false;
for (int i = 0; i < gridCount(); i++) {
IgniteKernal grid = (IgniteKernal)grid(i);
GridCacheAdapter<Object, Object> cache = grid.context().cache().internalCache();
if (cache.context().isNear())
cache = cache.context().near().dht();
while (true) {
try {
GridCacheEntryEx e = memoryMode() == CacheMemoryMode.ONHEAP_TIERED ?
cache.peekEx(key) : cache.entryEx(key);
if (e != null && e.deleted()) {
assertEquals(0, e.ttl());
assertFalse("Invalid entry [e=" + e + ", node=" + i + ']',
cache.affinity().isPrimaryOrBackup(grid.localNode(), key));
continue;
}
if (e == null)
assertTrue("Not found " + key, !cache.affinity().isPrimaryOrBackup(grid.localNode(), key));
else {
e.unswap();
found = true;
if (wait)
waitTtl(cache, key, ttl);
boolean primary = cache.affinity().isPrimary(grid.localNode(), key);
boolean backup = cache.affinity().isBackup(grid.localNode(), key);
assertEquals("Unexpected ttl [grid=" + i + ", nodeId=" + grid.getLocalNodeId() +
", key=" + key + ", e=" + e + ", primary=" + primary + ", backup=" + backup + ']', ttl, e.ttl());
if (ttl > 0)
assertTrue(e.expireTime() > 0);
else
assertEquals(0, e.expireTime());
}
break;
}
catch (GridCacheEntryRemovedException ignore) {
info("RETRY");
// Retry.
}
catch (GridDhtInvalidPartitionException ignore) {
// No need to check.
break;
}
}
}
assertTrue(found);
}
/**
* @param cache Cache.
* @param key Key.
* @param ttl TTL to wait.
* @throws IgniteInterruptedCheckedException If wait has been interrupted.
*/
private void waitTtl(final GridCacheAdapter<Object, Object> cache, final Object key, final long ttl)
throws IgniteInterruptedCheckedException {
GridTestUtils.waitForCondition(new PAX() {
@Override public boolean applyx() throws IgniteCheckedException {
GridCacheEntryEx entry;
while (true) {
try {
entry = memoryMode() == CacheMemoryMode.ONHEAP_TIERED ?
cache.peekEx(key) : cache.entryEx(key);
assert entry != null;
entry.unswap();
return entry.ttl() == ttl;
}
catch (GridCacheEntryRemovedException ignore) {
// Retry.
}
catch (GridDhtInvalidPartitionException ignore) {
return true;
}
}
}
}, 3000);
}
/**
*
*/
private static class GetEntryProcessor implements EntryProcessor<Integer, Integer, Integer> {
/** {@inheritDoc} */
@Override public Integer process(MutableEntry<Integer, Integer> e, Object... args) {
return e.getValue();
}
}
/**
*
*/
private static class TestPolicy implements ExpiryPolicy, Serializable {
/** */
private Long create;
/** */
private Long access;
/** */
private Long update;
/**
* @param create TTL for creation.
* @param access TTL for access.
* @param update TTL for update.
*/
TestPolicy(@Nullable Long create,
@Nullable Long update,
@Nullable Long access) {
this.create = create;
this.update = update;
this.access = access;
}
/** {@inheritDoc} */
@Override public Duration getExpiryForCreation() {
return create != null ? new Duration(TimeUnit.MILLISECONDS, create) : null;
}
/** {@inheritDoc} */
@Override public Duration getExpiryForAccess() {
return access != null ? new Duration(TimeUnit.MILLISECONDS, access) : null;
}
/** {@inheritDoc} */
@Override public Duration getExpiryForUpdate() {
return update != null ? new Duration(TimeUnit.MILLISECONDS, update) : null;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(TestPolicy.class, this);
}
}
}
| apache-2.0 |
shopizer-ecommerce/shopizer | sm-core/src/main/java/com/salesmanager/core/business/repositories/user/UserRepository.java | 2111 | package com.salesmanager.core.business.repositories.user;
import java.util.List;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Query;
import com.salesmanager.core.model.user.User;
public interface UserRepository extends JpaRepository<User, Long>, UserRepositoryCustom {
@Query("select distinct u from User as u left join fetch u.groups ug join fetch u.merchantStore um left join fetch u.defaultLanguage ul where u.adminName = ?1")
User findByUserName(String userName);
@Query("select distinct u from User as u left join fetch u.groups ug join fetch u.merchantStore um left join fetch u.defaultLanguage ul where u.id = ?1 and um.code = ?2")
User findByUserId(Long userId, String storeCode);
@Query("select distinct u from User as u left join fetch u.groups ug join fetch u.merchantStore um left join fetch u.defaultLanguage ul where u.adminName= ?1 and um.code = ?2")
User findByUserName(String userName, String storeCode);
@Query("select distinct u from User as u left join fetch u.groups ug join fetch u.merchantStore um left join fetch u.defaultLanguage ul where u.id = ?1")
User findOne(Long id);
@Query("select distinct u from User as u left join fetch u.groups ug join fetch u.merchantStore um left join fetch u.defaultLanguage ul order by u.id")
List<User> findAll();
@Query("select distinct u from User as u left join fetch u.groups ug join fetch u.merchantStore um left join fetch u.defaultLanguage ul where um.id = ?1 order by u.id")
List<User> findByStore(Integer storeId);
@Query("select distinct u from User as u left join fetch u.groups ug join fetch u.merchantStore um left join fetch u.defaultLanguage ul where u.id= ?1 and um.code = ?2")
User findByUserAndStore(Long userId, String storeCode);
@Query("select distinct u from User as u "
+ "left join fetch u.groups ug "
+ "join fetch u.merchantStore um "
+ "left join fetch u.defaultLanguage ul "
+ "where u.credentialsResetRequest.credentialsRequest = ?1 and um.code = ?2 ")
User findByResetPasswordToken(String token, String store);
}
| apache-2.0 |
josdem/client-chat | chat-xmpp/src/main/java/com/all/chat/xmpp/FacebookChatService.java | 13852 | /**
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2011 Eric Haddad Koenig
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.all.chat.xmpp;
import static com.all.shared.messages.MessEngineConstants.REPORT_USER_ACTION;
import javax.annotation.PreDestroy;
import org.jivesoftware.smack.ConnectionConfiguration;
import org.jivesoftware.smack.SASLAuthentication;
import org.jivesoftware.smackx.packet.VCard;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.all.chat.ChatType;
import com.all.chat.ChatUser;
import com.all.chat.Message;
import com.all.chat.exceptions.ChatException;
import com.all.messengine.MessEngine;
import com.all.shared.model.AllMessage;
import com.all.shared.stats.usage.UserActions;
@Service
public class FacebookChatService extends CommonSmackChatService {
public static final String FACEBOOK_HOST = "chat.facebook.com";
public static final int FACEBOOK_PORT = 5222;
@Autowired
private MessEngine messengine;
@PreDestroy
public void release() {
super.releaseResources();
}
@Override
protected ConnectionConfiguration getConnectionConfiguration() {
/* This handles the X-FACEBOOK-PLATFORM mechanism */
SASLAuthentication.registerSASLMechanism(SASLXFacebookPlatformMechanism.NAME, SASLXFacebookPlatformMechanism.class);
SASLAuthentication.supportSASLMechanism(SASLXFacebookPlatformMechanism.NAME, 0);
/* This handles the DIGEST-MD5 mechanism */
SASLAuthentication.registerSASLMechanism(SASLFacebookDigestMD5Mechanism.NAME, SASLFacebookDigestMD5Mechanism.class);
SASLAuthentication.supportSASLMechanism(SASLFacebookDigestMD5Mechanism.NAME, 1);
return new ConnectionConfiguration(FACEBOOK_HOST, FACEBOOK_PORT);
}
@Override
public ChatType getChatType() {
return ChatType.FACEBOOK;
}
@Override
protected VCard loadUserInfo(String username) {
if (!username.endsWith(FACEBOOK_HOST)) {
return super.loadUserInfo(new StringBuilder(username).append("@").append(FACEBOOK_HOST).toString());
}
return super.loadUserInfo(username);
}
@Override
public Message sendMessage(String message, ChatUser recipient) throws ChatException {
messengine.send(new AllMessage<Integer>(REPORT_USER_ACTION, UserActions.SocialNetworks.FACEBOOK_CHAT_MESSAGE_SENT));
return super.sendMessage(message, recipient);
}
}
| apache-2.0 |
googleapis/google-cloud-go | internal/generated/snippets/cloudtasks/apiv2beta2/Client/PauseQueue/main.go | 1412 | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by cloud.google.com/go/internal/gapicgen/gensnippets. DO NOT EDIT.
// [START cloudtasks_v2beta2_generated_CloudTasks_PauseQueue_sync]
package main
import (
"context"
cloudtasks "cloud.google.com/go/cloudtasks/apiv2beta2"
taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2"
)
func main() {
ctx := context.Background()
c, err := cloudtasks.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &taskspb.PauseQueueRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/tasks/v2beta2#PauseQueueRequest.
}
resp, err := c.PauseQueue(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
// [END cloudtasks_v2beta2_generated_CloudTasks_PauseQueue_sync]
| apache-2.0 |
iris-dni/iris-frontend | src/form/trustPublishConfirmationValidator.js | 179 | import FIELDS from 'components/TrustPublishConfirmationForm/fields';
import fieldValidator from 'form/fieldValidator';
export default (values) => fieldValidator(FIELDS, values);
| apache-2.0 |
dbeaver/dbeaver | plugins/org.jkiss.dbeaver.ext.postgresql.ui/src/org/jkiss/dbeaver/ext/postgresql/tools/fdw/PostgreFDWConfigWizard.java | 15043 | /*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2022 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.postgresql.tools.fdw;
import org.jkiss.code.Nullable;
import org.jkiss.dbeaver.DBException;
import org.jkiss.dbeaver.Log;
import org.jkiss.dbeaver.ext.postgresql.edit.PostgreForeignTableManager;
import org.jkiss.dbeaver.ext.postgresql.edit.PostgreTableColumnManager;
import org.jkiss.dbeaver.ext.postgresql.model.*;
import org.jkiss.dbeaver.ext.postgresql.model.fdw.FDWConfigDescriptor;
import org.jkiss.dbeaver.model.DBPContextProvider;
import org.jkiss.dbeaver.model.DBPDataSourceContainer;
import org.jkiss.dbeaver.model.DBPEvaluationContext;
import org.jkiss.dbeaver.model.DBUtils;
import org.jkiss.dbeaver.model.edit.DBECommandContext;
import org.jkiss.dbeaver.model.edit.DBEPersistAction;
import org.jkiss.dbeaver.model.exec.DBCExecutionContext;
import org.jkiss.dbeaver.model.exec.DBExecUtils;
import org.jkiss.dbeaver.model.impl.edit.SQLDatabasePersistAction;
import org.jkiss.dbeaver.model.impl.edit.SQLDatabasePersistActionComment;
import org.jkiss.dbeaver.model.impl.sql.edit.SQLObjectEditor;
import org.jkiss.dbeaver.model.navigator.DBNDataSource;
import org.jkiss.dbeaver.model.navigator.DBNDatabaseNode;
import org.jkiss.dbeaver.model.navigator.DBNModel;
import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor;
import org.jkiss.dbeaver.model.sql.SQLUtils;
import org.jkiss.dbeaver.model.struct.DBSEntity;
import org.jkiss.dbeaver.model.struct.DBSEntityAttribute;
import org.jkiss.dbeaver.model.struct.DBStructUtils;
import org.jkiss.dbeaver.model.virtual.DBVContainer;
import org.jkiss.dbeaver.model.virtual.DBVEntity;
import org.jkiss.dbeaver.model.virtual.DBVEntityForeignKey;
import org.jkiss.dbeaver.model.virtual.DBVModel;
import org.jkiss.dbeaver.runtime.DBWorkbench;
import org.jkiss.dbeaver.runtime.properties.PropertySourceCustom;
import org.jkiss.dbeaver.ui.dialogs.BaseWizard;
import org.jkiss.dbeaver.ui.editors.SimpleCommandContext;
import org.jkiss.utils.CommonUtils;
import java.lang.reflect.InvocationTargetException;
import java.util.*;
class PostgreFDWConfigWizard extends BaseWizard implements DBPContextProvider {
private static final Log log = Log.getLog(PostgreFDWConfigWizard.class);
private PostgreFDWConfigWizardPageInput inputPage;
private PostgreFDWConfigWizardPageConfig configPage;
private PostgreDatabase database;
private List<DBPDataSourceContainer> availableDataSources = null;
private List<DBSEntity> proposedEntities = null;
private List<DBNDatabaseNode> selectedEntities;
private DBPDataSourceContainer selectedDataSource;
private PostgreSchema selectedSchema;
private FDWInfo selectedFDW;
private String fdwServerId;
private PropertySourceCustom fdwPropertySource;
static class FDWInfo {
PostgreForeignDataWrapper installedFDW;
FDWConfigDescriptor fdwDescriptor;
String getId() {
return installedFDW != null ? installedFDW.getName() : fdwDescriptor.getFdwId();
}
String getDescription() {
return installedFDW != null ? installedFDW.getDescription() : fdwDescriptor.getDescription();
}
}
PostgreFDWConfigWizard(PostgreDatabase database) {
setWindowTitle("Foreign Data Wrappers configurator");
this.database = database;
setNeedsProgressMonitor(true);
this.fdwPropertySource = new PropertySourceCustom();
}
public PostgreDatabase getDatabase() {
return database;
}
public PostgreSchema getSelectedSchema() {
return selectedSchema;
}
public void setSelectedSchema(PostgreSchema selectedSchema) {
this.selectedSchema = selectedSchema;
}
public FDWInfo getSelectedFDW() {
return selectedFDW;
}
public void setSelectedFDW(FDWInfo selectedFDW) {
this.selectedFDW = selectedFDW;
}
public String getFdwServerId() {
return fdwServerId;
}
public void setFdwServerId(String fdwServerId) {
this.fdwServerId = fdwServerId;
}
public PropertySourceCustom getFdwPropertySource() {
return fdwPropertySource;
}
@Override
public void addPages() {
inputPage = new PostgreFDWConfigWizardPageInput(this);
configPage = new PostgreFDWConfigWizardPageConfig(this);
addPage(inputPage);
addPage(configPage);
addPage(new PostgreFDWConfigWizardPageFinal(this));
super.addPages();
}
public List<DBPDataSourceContainer> getAvailableDataSources() {
return availableDataSources == null ? Collections.emptyList() : availableDataSources;
}
public List<DBSEntity> getProposedEntities() {
return proposedEntities == null ? Collections.emptyList() : proposedEntities;
}
public DBPDataSourceContainer getSelectedDataSource() {
return selectedDataSource;
}
public List<DBNDatabaseNode> getSelectedEntities() {
return selectedEntities == null ? Collections.emptyList() : selectedEntities;
}
public void setSelectedEntities(List<DBNDatabaseNode> entities) {
this.selectedEntities = entities;
this.selectedDataSource = entities.isEmpty() ? null : entities.get(0).getDataSourceContainer();
}
public void addAvailableDataSource(DBPDataSourceContainer dataSource) {
availableDataSources.add(dataSource);
}
public void removeAvailableDataSource(DBPDataSourceContainer dataSource) {
availableDataSources.remove(dataSource);
}
void collectAvailableDataSources(DBRProgressMonitor monitor) {
if (availableDataSources != null) {
return;
}
Set<DBPDataSourceContainer> dataSources = new LinkedHashSet<>();
Set<DBSEntity> entities = new LinkedHashSet<>();
DBPDataSourceContainer curDataSource = database.getDataSource().getContainer();
// Find all virtual connections
DBVModel vModel = curDataSource.getVirtualModel();
monitor.beginTask("Check virtual foreign keys", 1);
collectAvailableDataSources(monitor, vModel, dataSources, entities);
monitor.done();
DBNModel navModel = DBWorkbench.getPlatform().getNavigatorModel();
// Check global FK references cache
Map<String, List<DBVEntityForeignKey>> grCache = DBVModel.getGlobalReferenceCache();
monitor.beginTask("Check external references", grCache.size());
for (Map.Entry<String, List<DBVEntityForeignKey>> grEntry : grCache.entrySet()) {
DBNDataSource refDataSource = navModel.getDataSourceByPath(
database.getDataSource().getContainer().getProject(),
grEntry.getKey());
if (refDataSource != null && refDataSource.getDataSourceContainer() == curDataSource) {
try {
for (DBVEntityForeignKey rfk : grEntry.getValue()) {
monitor.subTask("Check " + rfk.getEntity().getFullyQualifiedName(DBPEvaluationContext.UI));
DBSEntity refEntity = rfk.getEntity().getRealEntity(monitor);
if (refEntity != null) {
dataSources.add(refEntity.getDataSource().getContainer());
entities.add(refEntity);
}
}
} catch (DBException e) {
log.debug("Error getting referenced entity", e);
}
}
monitor.worked(1);
}
monitor.done();
// Check already configured FDW
// Done
availableDataSources = new ArrayList<>(dataSources);
proposedEntities = new ArrayList<>(entities);
}
private void collectAvailableDataSources(DBRProgressMonitor monitor, DBVContainer vContainer, Set<DBPDataSourceContainer> dataSources, Set<DBSEntity> entities) {
for (DBVContainer childContainer : vContainer.getContainers()) {
collectAvailableDataSources(monitor, childContainer, dataSources, entities);
}
for (DBVEntity vEntity : vContainer.getEntities()) {
for (DBVEntityForeignKey fk : vEntity.getForeignKeys()) {
DBPDataSourceContainer dataSource = fk.getAssociatedDataSource();
if (dataSource != database.getDataSource().getContainer()) {
dataSources.add(dataSource);
try {
entities.add(fk.getAssociatedEntity(monitor));
} catch (DBException e) {
log.debug("Error getting referenced entity", e);
}
}
}
}
}
@Override
public boolean performFinish() {
try {
getRunnableContext().run(true, true, monitor -> {
try {
installFDW(monitor);
} catch (Exception e) {
throw new InvocationTargetException(e);
}
});
} catch (InvocationTargetException e) {
DBWorkbench.getPlatformUI().showError("Error generating FDW", "Error during FDW script execution", e.getTargetException());
return false;
} catch (InterruptedException e) {
return false;
}
return true;
}
private void installFDW(DBRProgressMonitor monitor) throws DBException {
monitor.beginTask("Generate FDW script", 2);
monitor.subTask("Read actions");
List<DBEPersistAction> actions = generateScript(monitor);
monitor.subTask("Execute script");
DBCExecutionContext context = DBUtils.getDefaultContext(getDatabase(), false);
DBExecUtils.executeScript(monitor, context, "Install FDW", actions);
}
@Nullable
@Override
public DBCExecutionContext getExecutionContext() {
return DBUtils.getDefaultContext(database, true);
}
List<DBEPersistAction> generateScript(DBRProgressMonitor monitor) throws DBException {
PostgreDatabase database = getDatabase();
PostgreDataSource curDataSource = database.getDataSource();
List<DBEPersistAction> actions = new ArrayList<>();
PostgreFDWConfigWizard.FDWInfo selectedFDW = getSelectedFDW();
PropertySourceCustom propertySource = getFdwPropertySource();
Map<String, Object> propValues = propertySource.getPropertiesWithDefaults();
String serverId = getFdwServerId();
actions.add(new SQLDatabasePersistActionComment(curDataSource, "CREATE EXTENSION " + selectedFDW.getId()));
{
StringBuilder script = new StringBuilder();
script.append("CREATE SERVER ").append(serverId)
.append("\n\tFOREIGN DATA WRAPPER ").append(selectedFDW.getId())
.append("\n\tOPTIONS(");
boolean firstProp = true;
for (Map.Entry<String, Object> pe : propValues.entrySet()) {
String propName = CommonUtils.toString(pe.getKey());
String propValue = CommonUtils.toString(pe.getValue());
if (CommonUtils.isEmpty(propName) || CommonUtils.isEmpty(propValue)) {
continue;
}
if (!firstProp) script.append(", ");
script.append(propName).append(" '").append(propValue).append("'");
firstProp = false;
}
script
.append(")");
actions.add(new SQLDatabasePersistAction("Create extension", script.toString()));
}
actions.add(new SQLDatabasePersistAction("CREATE USER MAPPING FOR CURRENT_USER SERVER " + serverId));
// Now tables
DBECommandContext commandContext = new SimpleCommandContext(getExecutionContext(), false);
try {
PostgreFDWConfigWizard.FDWInfo fdwInfo = getSelectedFDW();
Map<String, Object> options = new HashMap<>();
options.put(SQLObjectEditor.OPTION_SKIP_CONFIGURATION, true);
PostgreForeignTableManager tableManager = new PostgreForeignTableManager();
PostgreTableColumnManager columnManager = new PostgreTableColumnManager();
for (DBNDatabaseNode tableNode : getSelectedEntities()) {
DBSEntity entity = (DBSEntity) tableNode.getObject();
PostgreTableForeign pgTable = (PostgreTableForeign) tableManager.createNewObject(monitor, commandContext, getSelectedSchema(), null, options);
if (pgTable == null) {
log.error("Internal error while creating new table");
continue;
}
pgTable.setName(entity.getName());
pgTable.setForeignServerName(serverId);
pgTable.setForeignOptions(new String[0]);
for (DBSEntityAttribute attr : CommonUtils.safeCollection(entity.getAttributes(monitor))) {
// Cache data types
PostgreSchema catalogSchema = database.getCatalogSchema(monitor);
if (catalogSchema != null) {
catalogSchema.getDataTypes(monitor);
}
String defTypeName = DBStructUtils.mapTargetDataType(database, attr, true);
String plainTargetTypeName = SQLUtils.stripColumnTypeModifiers(defTypeName);
PostgreDataType dataType = database.getDataType(monitor, plainTargetTypeName);
if (dataType == null) {
log.error("Data type '" + plainTargetTypeName + "' not found. Skip column mapping.");
continue;
}
PostgreTableColumn newColumn = columnManager.createNewObject(monitor, commandContext, pgTable, null, options);
assert newColumn != null;
newColumn.setName(attr.getName());
newColumn.setDataType(dataType);
}
DBEPersistAction[] tableDDL = tableManager.getTableDDL(monitor, pgTable, options);
Collections.addAll(actions, tableDDL);
}
} finally {
commandContext.resetChanges(true);
}
//CREATE SERVER clickhouse_svr FOREIGN DATA WRAPPER clickhousedb_fdw OPTIONS(dbname 'default', driver '/usr/local/lib/odbc/libclickhouseodbc.so', host '46.101.202.143');
return actions;
}
}
| apache-2.0 |
ansel86castro/Igneel | Samples/ForgeEditor/Testing.cs | 1323 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Igneel;
using Igneel.Effects;
using Igneel.Graphics;
using Igneel.Rendering;
using Igneel.SceneComponents;
using Igneel.Techniques;
namespace ForgeEditor
{
public class IdTechnique : DefaultTechnique
{
public RenderTexture2D RenderTarget { get; set; }
public static void Register()
{
Service.SetFactory<FrameMeshRender<RenderMeshIdEffect>>(new SingletonDisposableFactoryNew<FrameMeshRender<RenderMeshIdEffect>>());
RenderManager.RegisterRender<FrameMesh, IdTechnique, FrameMeshRender<RenderMeshIdEffect>>();
}
public override void Apply()
{
var scene = Engine.Scene;
var device = Engine.Graphics;
scene.UpdateVisibleComponents();
device.SaveRenderTarget();
RenderTarget.SetTarget(device);
device.Clear(ClearFlags.Target | ClearFlags.ZBuffer | ClearFlags.Stencil, new Color4(1, 1, 0, 0), 1, 0);
foreach (var entry in scene.VisibleComponents)
{
var item = entry.UpdateRender();
item.Draw(PixelClipping.None);
}
device.RestoreRenderTarget();
}
}
}
| apache-2.0 |
youkai-app/ProgressView | library/src/main/java/app/youkai/progressview/LongTouchHandler.java | 2516 | package app.youkai.progressview;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewConfiguration;
import java.util.Timer;
import java.util.TimerTask;
class LongTouchHandler implements View.OnTouchListener {
private IncrementListener incrementListener;
private Timer timer;
private int incrementBy;
LongTouchHandler (IncrementListener incrementListener, int incrementBy) {
this.incrementListener = incrementListener;
this.incrementBy = incrementBy;
}
@Override
public boolean onTouch(View v, MotionEvent event) {
if (v.isClickable()) {
/* handle press states */
if (event.getAction() == MotionEvent.ACTION_DOWN) {
v.setPressed(true);
timer = new Timer();
// Initial delay = length of a long press
timer.schedule(new IncrementTask(0, ViewConfiguration.getLongPressTimeout()), ViewConfiguration.getLongPressTimeout());
} else if (event.getAction() == MotionEvent.ACTION_UP) {
timer.cancel();
v.setPressed(false);
}
long lengthOfPress = event.getEventTime() - event.getDownTime();
// If the button has been "tapped" then handle normally
if (lengthOfPress < ViewConfiguration.getLongPressTimeout()
&& event.getAction() == MotionEvent.ACTION_UP) {
incrementListener.increment();
}
return true;
} else {
/* If the view isn't clickable, let the touch be handled by others. */
return false;
}
}
void cancelPress() {
try {
timer.cancel();
} catch (Exception e) {
// Swallow.
}
}
private class IncrementTask extends TimerTask {
private int count;
private final long initialDelay;
IncrementTask (int count, long initialDelay) {
this.count = count;
this.initialDelay = initialDelay;
}
@Override
public void run() {
incrementListener.incrementBy(incrementBy);
count++;
timer.schedule(new IncrementTask(count, initialDelay), (long) (newTimerCoefficient(count) * initialDelay));
}
private double newTimerCoefficient (int count) {
// Slow start, minimum delay will be 0.2 * initialDelay
return 0.2 + Math.exp(-0.3 * count);
}
}
}
| apache-2.0 |
morj/xodus | environment/src/main/java/jetbrains/exodus/tree/btree/LeafNodeDupMutable.java | 4848 | /**
* Copyright 2010 - 2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrains.exodus.tree.btree;
import jetbrains.exodus.ArrayByteIterable;
import jetbrains.exodus.ByteIterable;
import jetbrains.exodus.tree.ITree;
import org.jetbrains.annotations.NotNull;
import java.io.PrintStream;
/**
* Stateful leaf node with root page of duplicates sub-tree as a value
*/
class LeafNodeDupMutable extends BaseLeafNodeMutable {
@NotNull
protected final BTreeDupMutable tree;
LeafNodeDupMutable(@NotNull final BTreeDupMutable tree) {
this.tree = tree;
}
@Override
public long getAddress() {
return tree.address;
}
@NotNull
@Override
public BTreeBase getTree() {
return tree;
}
@NotNull
@Override
public AddressIterator addressIterator() {
final BTreeTraverser traverser = BTreeMutatingTraverser.create(tree);
return new AddressIterator(null, traverser.currentNode.size > 0, traverser);
}
@Override
public boolean hasValue() {
return false;
}
@Override
public boolean isDup() {
return true;
}
@Override
public long getDupCount() {
return tree.size;
}
@NotNull
BasePageMutable getRootPage() {
return tree.getRoot();
}
@Override
public boolean valueExists(@NotNull ByteIterable value) {
// value is a key in duplicates sub-tree
return tree.hasKey(value);
}
@Override
public int compareKeyTo(@NotNull final ByteIterable iterable) {
return tree.key.compareTo(iterable);
}
@Override
public int compareValueTo(@NotNull final ByteIterable iterable) {
return getValue().compareTo(iterable);
}
@Override
@NotNull
public ByteIterable getKey() {
return tree.key;
}
@Override
@NotNull
public ByteIterable getValue() {
return tree.getRoot().getMinKey().getKey();
}
@Override
public boolean delete(ByteIterable value) {
return tree.delete(value);
}
boolean put(@NotNull ByteIterable value) {
return tree.put(value, ByteIterable.EMPTY);
}
LeafNodeDupMutable putRight(@NotNull ByteIterable value) {
tree.putRight(value, ArrayByteIterable.EMPTY);
return this;
}
@Override
public long save(final ITree mainTree) {
if (tree.mainTree != mainTree) {
throw new IllegalArgumentException("Can't save LeafNodeDupMutable against mutable tree " +
"different from passed on creation");
}
return tree.save();
}
@Override
public String toString() {
return "LND* {key:" + getKey().toString() + '}';
}
@Override
public void dump(PrintStream out, int level, ToString renderer) {
super.dump(out, level, renderer);
tree.getRoot().dump(out, level + 1, renderer);
}
/**
* Convert any leaf to mutable leaf with duplicates support
*
* @param ln leaf node to convert
* @param mainTree its tree
* @return mutable copy of ln
*/
static LeafNodeDupMutable convert(@NotNull ILeafNode ln, @NotNull BTreeMutable mainTree) {
final boolean isLeafNodeDup = ln.isDup();
if (isLeafNodeDup && ln instanceof LeafNodeDupMutable) {
return (LeafNodeDupMutable) ln;
}
// wrapper tree that doesn't allow duplicates
final BTreeDupMutable dupTree = isLeafNodeDup ?
((LeafNodeDup) ln).getTreeCopyMutable() :
new BTreeDupMutable(
new BTreeEmpty(mainTree.getLog(), mainTree.getBalancePolicy(), false, mainTree.getStructureId()),
ln.getKey()
);
dupTree.mainTree = mainTree;
return convert(ln, mainTree, dupTree);
}
static LeafNodeDupMutable convert(@NotNull ILeafNode ln, @NotNull BTreeMutable mainTree, @NotNull BTreeDupMutable dupTree) {
final LeafNodeDupMutable result = new LeafNodeDupMutable(dupTree);
if (ln.isDup()) {
return result;
} else {
// leaf node with one value -- add it
mainTree.decrementSize(1); // hack
result.put(ln.getValue());
return result;
}
}
}
| apache-2.0 |
leleuj/cas | support/cas-server-support-oauth-core-api/src/main/java/org/apereo/cas/ticket/OAuth20TokenSigningAndEncryptionService.java | 1634 | package org.apereo.cas.ticket;
import org.apereo.cas.support.oauth.services.OAuthRegisteredService;
import org.jose4j.jws.AlgorithmIdentifiers;
import org.jose4j.jwt.JwtClaims;
import java.util.Optional;
/**
* This is {@link OAuth20TokenSigningAndEncryptionService}.
*
* @author Misagh Moayyed
* @since 6.0.0
*/
public interface OAuth20TokenSigningAndEncryptionService {
/**
* Sign id token.
*
* @param service the service
* @param claims the claims
* @return the string
*/
String encode(OAuthRegisteredService service, JwtClaims claims);
/**
* Decode jwt claims.
*
* @param token the token
* @param service the service
* @return the jwt claims
*/
JwtClaims decode(String token, Optional<OAuthRegisteredService> service);
/**
* Gets json web key signing algorithm.
*
* @param svc the svc
* @return the json web key signing algorithm
*/
default String getJsonWebKeySigningAlgorithm(final OAuthRegisteredService svc) {
return AlgorithmIdentifiers.RSA_USING_SHA256;
}
/**
* Gets issuer.
*
* @return the issuer
*/
String getIssuer();
/**
* Should sign token for service?
*
* @param svc the svc
* @return the boolean
*/
default boolean shouldSignToken(final OAuthRegisteredService svc) {
return false;
}
/**
* Should encrypt token for service?
*
* @param svc the svc
* @return the boolean
*/
default boolean shouldEncryptToken(final OAuthRegisteredService svc) {
return false;
}
}
| apache-2.0 |
JonDouglas/xamarin-android-tutorials | Notifications/Lollipop/Lollipop/Resources/Resource.Designer.cs | 3422 | #pragma warning disable 1591
//------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by a tool.
// Runtime Version:4.0.30319.34014
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
[assembly: global::Android.Runtime.ResourceDesignerAttribute("Lollipop.Resource", IsApplication=true)]
namespace Lollipop
{
[System.CodeDom.Compiler.GeneratedCodeAttribute("Xamarin.Android.Build.Tasks", "1.0.0.0")]
public partial class Resource
{
static Resource()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
public static void UpdateIdValues()
{
}
public partial class Array
{
// aapt resource value: 0x7f040000
public const int types = 2130968576;
static Array()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Array()
{
}
}
public partial class Attribute
{
static Attribute()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Attribute()
{
}
}
public partial class Drawable
{
// aapt resource value: 0x7f020000
public const int Icon = 2130837504;
static Drawable()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Drawable()
{
}
}
public partial class Id
{
// aapt resource value: 0x7f060001
public const int delay = 2131099649;
// aapt resource value: 0x7f060002
public const int download = 2131099650;
// aapt resource value: 0x7f060000
public const int type = 2131099648;
static Id()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Id()
{
}
}
public partial class Layout
{
// aapt resource value: 0x7f030000
public const int Main = 2130903040;
static Layout()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Layout()
{
}
}
public partial class String
{
// aapt resource value: 0x7f050001
public const int ApplicationName = 2131034113;
// aapt resource value: 0x7f050000
public const int Hello = 2131034112;
// aapt resource value: 0x7f050008
public const int delay_label = 2131034120;
// aapt resource value: 0x7f050003
public const int download_complete = 2131034115;
// aapt resource value: 0x7f050005
public const int exception = 2131034117;
// aapt resource value: 0x7f050004
public const int fun = 2131034116;
// aapt resource value: 0x7f050009
public const int notify_button = 2131034121;
// aapt resource value: 0x7f050006
public const int play = 2131034118;
// aapt resource value: 0x7f05000b
public const int public_text = 2131034123;
// aapt resource value: 0x7f05000a
public const int public_title = 2131034122;
// aapt resource value: 0x7f050002
public const int sample = 2131034114;
// aapt resource value: 0x7f050007
public const int type_label = 2131034119;
static String()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private String()
{
}
}
}
}
#pragma warning restore 1591
| apache-2.0 |
KRMAssociatesInc/eHMP | ehmp/product/production/hmp-main/src/main/java/gov/va/cpe/vpr/sync/SyncService.java | 31811 | package gov.va.cpe.vpr.sync;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import static gov.va.cpe.vpr.sync.MessageDestinations.COMMAND_QUEUE;
import static gov.va.cpe.vpr.sync.MessageDestinations.IMPORT_QUEUE;
import static gov.va.cpe.vpr.sync.MessageDestinations.PATIENT_QUEUE;
import static gov.va.cpe.vpr.sync.SyncCommand.IMPORT_CHUNK;
import static gov.va.cpe.vpr.sync.SyncCommand.VPR_UPDATE_COMPLETE;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.COMMAND;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.ERROR_LEVEL;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.PATIENT_DOMAINS_BY_PID;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.PATIENT_ID;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.PATIENT_IDS;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.TIMESTAMP;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.UID;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.VISTA_ID;
import static gov.va.cpe.vpr.sync.SyncMessageConstants.VISTA_LAST_UPDATED;
import static gov.va.hmp.HmpProperties.SERVER_ID;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.gson.JsonObject;
import com.google.gson.reflect.TypeToken;
import gov.va.cpe.vpr.PatientDemographics;
import gov.va.cpe.vpr.UidUtils;
import gov.va.cpe.vpr.dao.IVprSyncErrorDao;
import gov.va.cpe.vpr.dao.IVprSyncStatusDao;
import gov.va.cpe.vpr.pom.IPatientDAO;
import gov.va.cpe.vpr.pom.POMUtils;
import gov.va.cpe.vpr.sync.msg.ClearPatientMessageHandler;
import gov.va.cpe.vpr.sync.msg.ErrorLevel;
import gov.va.cpe.vpr.sync.vista.IVistaOperationalDataDAO;
import gov.va.cpe.vpr.sync.vista.IVistaVprDataExtractEventStreamDAO;
import gov.va.cpe.vpr.sync.vista.VistaDataChunk;
import gov.va.hmp.healthtime.PointInTime;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.jms.InvalidSelectorException;
import javax.jms.JMSException;
import javax.jms.MapMessage;
import javax.jms.Message;
import javax.jms.QueueBrowser;
import javax.jms.Session;
import org.apache.activemq.broker.jmx.QueueViewMBean;
import org.apache.commons.lang.NotImplementedException;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.response.FacetField;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.core.convert.ConversionService;
import org.springframework.dao.DataAccessResourceFailureException;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageImpl;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Sort;
import org.springframework.jms.JmsException;
import org.springframework.jms.core.BrowserCallback;
import org.springframework.jms.core.JmsOperations;
import org.springframework.jms.core.MessagePostProcessor;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
/**
* Class responsible for dispatching sync messages via JMS for processing by JMS listeners.
*/
@Service
public class SyncService implements ISyncService {
static final String JMSXGROUP_ID = "JMSXGroupID";
static final String JMSXGROUP_SEQ = "JMSXGroupSeq";
static final int AUTO_UPDATE_JMS_PRIORITY = 8; // 0-9, (0-4 are gradations of normal, 5-9 gradations of expedited priority)
// private static final long PATIENT_QUEUE_IDLE_THRESHOLD = 10000;
private static Logger log = LoggerFactory.getLogger(SyncService.class);
private IPatientDAO patientDao;
private JmsOperations jmsTemplate;
private ConversionService conversionService;
private QueueViewMBean vprWorkQueueMBean;
private QueueViewMBean vprCommandQueueMBean;
private QueueViewMBean vprDLQMBean;
private MetricRegistry metrics;
private IVprSyncStatusDao syncStatusDao;
private IVprSyncErrorDao errorDao;
private IVistaVprDataExtractEventStreamDAO eventStreamDAO;
private IVistaOperationalDataDAO operationalDataService;
private boolean reindexAllComplete = true;
private SolrServer solrServer;
private boolean dataStreamEnabled = true;
private String dataStreamDisabledMsg;
private Exception dataStreamDisabledException;
private ClearPatientMessageHandler clearPatientMessageHandler;
@Autowired
public void setClearPatientMessageHandler(ClearPatientMessageHandler clearPatientMessageHandler) {
log.debug("ClearPatientMessageHandler was autowired and is: " + ((clearPatientMessageHandler==null)?"null":"not null"));
this.clearPatientMessageHandler = clearPatientMessageHandler;
}
@Autowired
public void setErrorDao(IVprSyncErrorDao errorDao) {
this.errorDao = errorDao;
}
@Autowired
public void setSyncStatusDao(IVprSyncStatusDao syncStatusDao) {
this.syncStatusDao = syncStatusDao;
}
public JmsOperations getJmsTemplate() {
return jmsTemplate;
}
@Autowired
public void setJmsTemplate(JmsOperations jmsTemplate) {
this.jmsTemplate = jmsTemplate;
}
@Autowired
public void setPatientDao(@Qualifier("jdsPatientDao") IPatientDAO patientDao) {
this.patientDao = patientDao;
}
@Autowired
public void setConversionService(ConversionService conversionService) {
this.conversionService = conversionService;
}
@Autowired
public void setVprWorkQueueMBean(QueueViewMBean vprWorkQueueMBean) {
this.vprWorkQueueMBean = vprWorkQueueMBean;
}
@Autowired
public void setVprCommandQueueMBean(QueueViewMBean vprCommandQueueMBean) {
this.vprCommandQueueMBean = vprCommandQueueMBean;
}
@Autowired
public void setVprDeadLetterQueueMBean(QueueViewMBean vprDeadLetterQueueMBean) {
this.vprDLQMBean = vprDeadLetterQueueMBean;
}
@Autowired
public void setMetricRegistry(MetricRegistry metrics) {
this.metrics = metrics;
}
@Autowired
public void setEventStreamDAO(IVistaVprDataExtractEventStreamDAO eventStreamDAO) {
this.eventStreamDAO = eventStreamDAO;
}
@Autowired
public void setOperationalDataService(IVistaOperationalDataDAO operationalDataService) {
this.operationalDataService = operationalDataService;
}
@Override
public long getOperationalImportQueueSize() {
try {
long rslt = vprWorkQueueMBean.getQueueSize();
if (rslt < 0) {
vprWorkQueueMBean.purge();
rslt = vprWorkQueueMBean.getQueueSize();
}
log.debug("SyncService.getOperationalImportQueueSize: QueueSize: " + rslt);
return rslt;
} catch (Exception e) {
log.warn("Error connecting to work queue : " + e.getMessage());
return -1;
}
}
public long getSynchingPatientCount() {
return syncStatusDao.findAllLoadingPatientStatii().size();
}
// S64 MERGE - Relic from the merge - not sure if we need it.
//@Override
//public boolean isPatientLoaded(Patient pt) {
// SyncStatus stat = syncStatusDao.getForPid(pt.getPid()==null?PidUtils.getPid(pt):pt.getPid());
// return stat!=null && stat.getSyncComplete();
//}
public ArrayList<Map<String, Object>> getPatientQueueSizes() {
ArrayList<Map<String, Object>> rslt = new ArrayList<Map<String, Object>>();
List<SyncStatus> statii = syncStatusDao.findAllLoadingPatientStatii();
for (SyncStatus stat : statii) {
Map<String, Object> patCollections = patientDao.getSynchedCollectionCounts(stat.getPid());
Map<String, Integer> statExpectedTotals = stat.getDomainExpectedTotalsForAllSystemIds();
int size = 0;
boolean pendingResponses = false;
for (String key : statExpectedTotals.keySet()) {
Integer expected = statExpectedTotals.get(key);
if (expected == null) {
pendingResponses = true;
} else {
Integer found = (patCollections == null ? 0 : patCollections.get(key) == null ? 0 : Integer.valueOf(patCollections.get(key).toString()));
if (found < expected) {
size += (expected - found);
}
}
}
if (size > 0 || pendingResponses) {
Map<String, Object> row = new HashMap<String, Object>();
row.put("pid", stat.getPid());
row.put("size", size);
row.put("pending", pendingResponses);
rslt.add(row);
}
}
return rslt;
}
@Override
public void redeliverDeadLetter(String recId) {
try {
errorDao.deleteByJMSMessageId(recId);
vprDLQMBean.retryMessage(recId);
vprDLQMBean.removeMessage(recId);
} catch (Exception e) {
log.error("redeliver dead letter", e);
}
}
@Override
public long getCommandQueueSize() {
try {
long rslt = vprCommandQueueMBean.getQueueSize();
if (rslt < 0) {
vprCommandQueueMBean.purge();
rslt = vprCommandQueueMBean.getQueueSize();
}
return rslt;
} catch (Exception e) {
log.warn("Error connecting to command queue : " + e.getMessage());
return -1;
}
}
private void sendCommandMsg(final String command, final Map msg) {
jmsTemplate.convertAndSend(COMMAND_QUEUE, msg, new CommandPostProcessor(command));
}
public void sendImportVistaDataExtractItemMsg(final VistaDataChunk item) {
Map importMsg;
try {
Timer.Context convertContext = metrics.timer(MetricRegistry.name("vpr.convertChunk")).time();
importMsg = conversionService.convert(item, Map.class);
convertContext.stop();
} catch (RuntimeException e) {
throw e;
}
try {
Timer.Context readyContext = metrics.timer(MetricRegistry.name("vpr.readyChunk")).time();
readyContext.stop();
Timer.Context sendContext = metrics.timer(MetricRegistry.name("vpr.sendChunk")).time();
String pid = item.getPatientId();
if(pid==null) {
getJmsTemplate().convertAndSend("vpr.import", importMsg, new PatientCommandPostProcessor(IMPORT_CHUNK, item.getPatientId(), item.isBatch()));
} else {
// String queueName = "vpr.patient."+pid;
importMsg.put("pid",pid);
String queueName = PATIENT_QUEUE;
getJmsTemplate().convertAndSend(queueName, importMsg, new PatientCommandPostProcessor(IMPORT_CHUNK, item.getPatientId(), item.isBatch()));
}
sendContext.stop();
} catch (Exception e) {
errorDuringMsg(importMsg, e, ErrorLevel.ERROR);
}
}
public void retryMsg(Map msg) {
try {
getJmsTemplate().convertAndSend(IMPORT_QUEUE, msg);
} catch (Exception e) {
errorDuringMsg(msg, e, ErrorLevel.ERROR);
}
}
public void sendReindexPatientMsg(PatientDemographics pt) {
sendReindexPatientMsg(pt.getPid());
}
public void sendReindexPatientMsg(String pid) {
Map msg = new HashMap();
msg.put(PATIENT_ID, pid);
sendCommandMsg(SyncCommand.PATIENT_REINDEX, msg);
}
public void sendReindexAllPatientsMsg() {
List<String> pids = patientDao.listLoadedPatientIds();
for (String pid : pids) {
sendReindexPatientMsg(pid);
}
Map completeMsg = new HashMap();
completeMsg.put(TIMESTAMP, System.currentTimeMillis());
sendCommandMsg(SyncCommand.PATIENT_REINDEX_ALL_COMPLETE, completeMsg);
}
public void sendClearPatientMsg(PatientDemographics pt) {
log.debug("sendClearPatientMsg(pt): Entered method. pt: " + ((pt == null) ? "null" : pt.toJSON()));
sendClearPatientMsg(pt.getPid());
}
public void sendClearPatientMsg(String pid) {
log.debug("SyncService.sendClearPatientMsg(pid): Starting unsync of a patient for pid: " + pid);
clearPatientMessageHandler.clearPatient(pid);
// Remove any relics of the sync status now.
//------------------------------------------
SyncStatus stat = syncStatusDao.findOneByPid(pid);
if (stat != null) {
log.debug("sendClearPatientMsg: deleting the sync status for pid: " + pid);
syncStatusDao.delete(stat);
}
else {
log.debug("sendClearPatientMsg: sync status did not exist for pid: " + pid + " - no need to delete it.");
}
log.debug("SyncService.sendClearPatientMsg: End of unsync of a patient for pid: " + pid);
}
public void sendClearItemMsg(String uid) {
Map msg = new HashMap();
msg.put(UID, uid);
sendCommandMsg(SyncCommand.ITEM_CLEAR, msg);
}
public void sendClearAllPatientsMsg() {
List<String> pids = patientDao.listLoadedPatientIds();
for (String pid : pids) {
sendClearPatientMsg(pid);
}
}
@Override
public void sendHdrPatientImportMsg(String pid, String division, String vistaId) {
Map<String, Object> msg = new HashMap<>();
msg.put(SyncMessageConstants.PATIENT_ID,pid);
msg.put(SyncMessageConstants.DIVISION,division);
msg.put(SyncMessageConstants.VISTA_ID,vistaId);
this.sendCommandMsg(SyncCommand.HDR_IMPORT,msg);
}
public void sendUpdateVprCompleteMsg(String serverId, String vistaId, String lastUpdate, Map<String, Set<String>> domainsByPatientId) {
Map msg = new HashMap();
msg.put(VISTA_ID, vistaId);
msg.put(SERVER_ID, serverId);
msg.put(TIMESTAMP, System.currentTimeMillis());
msg.put(PATIENT_IDS, domainsByPatientId != null ? StringUtils.collectionToCommaDelimitedString(domainsByPatientId.keySet()) : "");
msg.put(PATIENT_DOMAINS_BY_PID, domainsByPatientId != null ? POMUtils.toJSON(domainsByPatientId) : "");
msg.put(VISTA_LAST_UPDATED, lastUpdate);
sendCommandMsg(VPR_UPDATE_COMPLETE, msg);
}
@Override
public void cancelPendingMessages() {
try {
vprWorkQueueMBean.purge();
vprCommandQueueMBean.purge();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void warningDuringMsg(final Map msg, String warning) {
getJmsTemplate().convertAndSend(MessageDestinations.WARNING_QUEUE, SyncMessageUtils.createWarningMessage(msg, warning), new MessagePostProcessor() {
@Override
public Message postProcessMessage(Message message) throws JMSException {
message.setStringProperty(PATIENT_ID, (String) msg.get(PATIENT_ID)); // this enables us to select messages from the queue by pid
return message;
}
});
}
public void errorDuringMsg(final Map msg, Throwable t, final String lvl) {
try {
getJmsTemplate().convertAndSend(MessageDestinations.DEAD_LETTER_QUEUE, SyncMessageUtils.createErrorMessage(msg, t, lvl), new MessagePostProcessor() {
@Override
public Message postProcessMessage(Message message) throws JMSException {
message.setStringProperty(PATIENT_ID, (String) msg.get(PATIENT_ID)); // this enables us to select messages from the queue by pid
message.setStringProperty(ERROR_LEVEL, lvl);
return message;
}
});
Object msgPid = msg.get(SyncMessageConstants.PATIENT_ID);
if(msgPid!=null) {
// FIXME: move this into metadata rather than on demographics object
PatientDemographics pat = patientDao.findByPid(msg.get(SyncMessageConstants.PATIENT_ID).toString());
if(pat!=null) {
pat.incrementSyncErrorCount();
patientDao.save(pat);
}
}
} catch (JmsException e) {
Object pid = msg.get(SyncMessageConstants.PATIENT_ID);
String pidString = pid == null ? "" : String.valueOf(pid);
log.error("unable to put error msg in error queue: " + t.getMessage() + "\n" + pidString, e);
}
}
public void setReindexAllComplete(boolean reindexAllComplete) {
this.reindexAllComplete = reindexAllComplete;
}
public boolean isReindexAllComplete() {
return reindexAllComplete;
}
static class CommandPostProcessor implements MessagePostProcessor {
protected String command;
protected CommandPostProcessor(String command) {
this.command = command;
}
public String getCommand() {
return command;
}
@Override
public Message postProcessMessage(Message message) throws JMSException {
message.setStringProperty(COMMAND, command);
return message;
}
}
static class PatientCommandPostProcessor extends CommandPostProcessor {
protected String pid;
protected boolean batch;
PatientCommandPostProcessor(String command, String pid) {
this(command, pid, false);
}
PatientCommandPostProcessor(String command, String pid, boolean autoUpdate) {
super(command);
this.pid = pid;
this.batch = batch;
}
@Override
public Message postProcessMessage(Message message) throws JMSException {
Message m = super.postProcessMessage(message);
if (StringUtils.hasText(pid)) {
m.setStringProperty(JMSXGROUP_ID, getPatientMessageGroupId(pid));
m.setStringProperty(PATIENT_ID, pid);
if (!batch) {
m.setJMSPriority(AUTO_UPDATE_JMS_PRIORITY);
}
}
return m;
}
private String getPatientMessageGroupId(String pid) {
return "vpr.pt." + pid;
}
}
static class OperationalDomainCommandPostProcessor extends CommandPostProcessor {
protected String domain;
OperationalDomainCommandPostProcessor(String command, String domain) {
super(command);
this.domain = domain;
}
@Override
public Message postProcessMessage(Message message) throws JMSException {
log.debug("SyncService.OperationalDomainCommandPostProcessor.postProcessMessage: Domain: " + domain);
Message m = super.postProcessMessage(message);
m.setStringProperty(JMSXGROUP_ID, getOperationalDomainGroupId(domain));
return m;
}
private String getOperationalDomainGroupId(String domain) {
return "odc.domain." + domain;
}
}
@Override
public List<Map> getCommandQueueDetail() {
List<Map> rslt = new ArrayList<Map>();
try {
List<?> msgs = vprCommandQueueMBean.browseMessages();
for(Object msg: msgs) {
String domain = ((MapMessage)msg).getString(SyncMessageConstants.DOMAIN);
Map<String, Object> rmsg = new HashMap<String, Object>();
rmsg.put("domain",domain);
rmsg.put("text",domain);
rslt.add(rmsg);
}
} catch (InvalidSelectorException e) {
throw new DataAccessResourceFailureException(e.getMessage(), e);
} catch (JMSException e) {
throw new DataAccessResourceFailureException(e.getMessage(), e);
}
return rslt;
}
private boolean messageMatch(SyncError msg, String searchStrings, String searchAreas) throws IllegalAccessException {
boolean matched = false;
if(!(searchStrings==null || searchStrings.isEmpty())) {
String[] searchStringArray = searchStrings.split(" ");
for(String searchString : searchStringArray) {
// if we matched this message already, go to the next message
if(matched) { break; }
if (searchAreas != null && searchAreas != "") {
String[] areas = searchAreas.split("-");
matched = msg.match(searchString, areas);
}
else {
matched = msg.match(searchString, null);
}
}
}
return matched;
}
@Override
public Page<SyncError> findAllErrors(final Pageable pageable, final Boolean includeWarnings, String searchStrings, String searchAreas) {
int total = pageable.getOffset();
int max = total + pageable.getPageSize();
List<SyncError> emsgs;
List<SyncError> rslt = new ArrayList<>();
emsgs = errorDao.getAllSyncErrors();
if(searchStrings!=null && !searchStrings.isEmpty()) {
List<SyncError> filteredList = new ArrayList<>();
for(SyncError err: emsgs) {
try {
if(messageMatch(err, searchStrings, searchAreas)) {
filteredList.add(err);
}
} catch (IllegalAccessException e) {
e.printStackTrace();
log.error("Error checking field values for match string: "+e.getMessage(), e);
}
}
emsgs = filteredList;
}
if(total>emsgs.size())
{
// TODO: Recover from a case where the page is affected by different search criteria. Reset to last page possible.
throw new IllegalArgumentException("Requested offset " + pageable.getOffset() + " is greater than the number of error messages (" + emsgs.size() + ")");
}
for (int i = total; (rslt.size()+total) < max && emsgs.size()>i; i++) {
rslt.add(emsgs.get(i));
}
return new PageImpl<>(rslt, pageable, emsgs.size());
}
@Override
public Page<SyncError> findAllErrors(Pageable pageable, Boolean includeWarnings, String searchString) {
return findAllErrors(pageable, includeWarnings, searchString, null);
}
@Override
public Integer getPatientErrorCount(String pid) {
return errorDao.getErrorCountForPid(pid);
}
@Override
public Integer getNumPatientsWithErrors() {
return errorDao.getErrorPatientCount();
}
@Override
public Page<SyncError> findAllErrorsByPatientId(final String pid, final Pageable pageable) {
List<SyncError> emsgs = errorDao.getAllSyncErrorsForPid(pid);
int total = pageable.getOffset();
int max = total + pageable.getPageSize();
if(total>emsgs.size())
{
throw new IllegalArgumentException("Requested offset " + pageable.getOffset() + " is greater than the number of error messages for patient '" + pid + "' (" + emsgs.size() + ")");
}
// grab a page worth's of MapMessages and convert them to SyncError objects
List<SyncError> messages = new ArrayList<SyncError>(pageable.getPageSize());
for (int i = total; i < max; i++) {
if (emsgs.size()>i) {
messages.add(emsgs.get(i));
}
}
return new PageImpl<SyncError>(messages, pageable, emsgs.size());
}
@Override
public int deleteErrorByPatientId(String pid) {
String selector = getPatientIdSelector(pid);
int num = jmsTemplate.browseSelected(MessageDestinations.DEAD_LETTER_QUEUE, selector, new BrowserCallback<Integer>() {
@Override
public Integer doInJms(Session session, QueueBrowser browser) throws JMSException {
return Collections.list(browser.getEnumeration()).size();
}
});
for (int i = 0; i < num; i++) {
jmsTemplate.receiveSelected(MessageDestinations.DEAD_LETTER_QUEUE, selector);
}
return num;
}
private String getPatientIdSelector(String pid) {
return SyncMessageConstants.PATIENT_ID + "='" + pid + "'";
}
@Override
public SyncError findOneError(String id) {
return errorDao.getOneByJMSMessageId(id);
}
@Override
public long getErrorCount() {
return errorDao.getSyncErrorCount();
}
@Override
public void deleteError(String id) {
jmsTemplate.receiveSelected(MessageDestinations.DEAD_LETTER_QUEUE, "JMSMessageID='" + id + "'");
errorDao.deleteByJMSMessageId(id);
}
@Override
public void deleteError(SyncError err) {
deleteError(err.getId());
}
@Override
public void deleteAllErrors() {
try {
vprDLQMBean.purge();
errorDao.purge();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public List<SyncError> findAllErrors() {
throw new NotImplementedException();
}
@Override
public List<SyncError> findAllErrors(Sort sort) {
throw new NotImplementedException();
}
@Override
public Page<SyncError> findAllErrors(Pageable pageable) {
return this.findAllErrors(pageable, true, null);
}
@Override
public void subscribePatient(String vistaId, PatientDemographics pat) {
log.debug("SyncService.subscribePatient(vistaId, pat): Entered method. vistaId: " + vistaId + "; pat: " +
((pat == null) ? "null" : pat.toJSON()));
if (pat != null) {
subscribePatient(pat.getLocalPatientIdForSystem(vistaId), pat.getPid());
}
else {
log.debug("SyncService.subscribePatient(vistaId, pat): pat was null. vistaId: " + vistaId + ". No subscription was done.");
}
}
@Override
public void subscribePatient(String vistaId, String pid) {
log.debug("SyncService.subscribePatient(vistaId, pid): Entered method. vistaId: " + vistaId + "; pid: " + pid);
eventStreamDAO.subscribePatient(vistaId, pid, true);
}
@Override
public void subscribePatient(JsonObject mvi, String pid, String edipi) {
log.debug("SyncService.subscribePatient(mvi,pid): Entered method. mvi: " + mvi.toString() + "; pid: " + pid + "; edipi: " + edipi);
eventStreamDAO.subscribePatient(mvi, pid, edipi);
}
@Override
public void subscribePatient(String prioritySelect, List<String> sitesToSync, String pid) {
log.debug("SyncService.subscribePatient(prioritySelect, sitesToSync, pid): Entered method. prioritySelect: " + prioritySelect + "; sitesToSync: " + sitesToSync.toString() + "; pid: " + pid);
eventStreamDAO.subscribePatient(prioritySelect, sitesToSync, pid);
}
@Override
public void subscribeOperational(String vistaId) {
log.debug("SyncService.subscribeOperational: vistaId: " + vistaId);
operationalDataService.subscribe(vistaId);
}
@Override
public boolean isOperationalSynching() {
log.debug("SyncService.isOperationalSynching: Entering method: ");
SyncStatus stat = syncStatusDao.findOneForOperational();
if (stat == null || !stat.getSyncOperationalComplete()) {
log.debug("SyncService.isOperationalSynching: returning: false (stat was null)");
return true;
}
log.debug("SyncService.isOperationalSynching: returning: " + !stat.getSyncComplete());
return false;
}
@Override
public void resetServerSubscriptions(String vistaId) {
operationalDataService.resetServerSubscriptions(vistaId);
}
@Override
public SyncStatus getOperationalSyncStatus() {
log.debug("SyncService.getOperationalSyncStatus: Entering method: ");
return syncStatusDao.findOneForOperational();
}
@Override
public SyncStatus getPatientSyncStatus(String pid) {
return syncStatusDao.findOneByPid(pid);
}
@Override
public boolean isNotLoadedAndNotLoading(String pid) {
return syncStatusDao.findOneByPid(pid) == null;
}
@Autowired
public void setSolrServer(SolrServer solrServer){
this.solrServer = solrServer;
}
int solrServerInitRetry = 0;
@Override
public Map<String, Integer> getIndexAndJdsPatientCounts() throws SolrServerException {
SolrQuery qry = new SolrQuery("*:*");
qry.setRows(0);
qry.addFacetField("pid");
qry.setFacetLimit(-1); // Default is 100;
try {
QueryResponse resp = solrServer.query(qry);
FacetField ff = resp.getFacetField("pid");
int solrPidCount = ff.getValues().size();
int jdsPidCount = patientDao.count();
Map<String, Integer> rslt = new HashMap<>();
rslt.put("solrPidCount",solrPidCount);
rslt.put("jdsPidCount",jdsPidCount);
return rslt;
} catch(SolrServerException e) {
// Short-term handling when embedded SolrServer is not initializing fast enough;
// Long-term plan is to have a dedicated SolrServer.
if(e.getMessage().toLowerCase().contains("server refused connection")) {
if(solrServerInitRetry++<5) {
try {
log.warn("SOLR server refused connection when trying to get PID count; Retry #"+solrServerInitRetry);
Thread.sleep(2000);
} catch (InterruptedException e1) {
e1.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
}
return getIndexAndJdsPatientCounts();
}
}
}
return null;
}
@Override
public void setDataStreamEnabled(boolean b, String disabledMsg, Exception disabledException) {
this.dataStreamEnabled = b;
this.dataStreamDisabledMsg = disabledMsg;
this.dataStreamDisabledException = disabledException;
}
@Override
public boolean isDataStreamEnabled() {
return dataStreamEnabled;
}
@Override
public Map<String, Object> getDataStreamErrorDetails() {
Map<String, Object> rslt = new HashMap<>();
rslt.put("disabledMsg",dataStreamDisabledMsg);
rslt.put("disabledException",dataStreamDisabledException);
return rslt;
}
@Override
public void expireSite(String pid, String vistaId, PointInTime time) {
eventStreamDAO.expireSite(pid, vistaId, time);
}
}
| apache-2.0 |
gatling/gatling | src/docs/content/reference/current/mqtt/code/MqttSampleScala.scala | 3801 | import io.gatling.core.Predef._
import scala.concurrent.duration._
//#imprts
import io.gatling.mqtt.Predef._
//#imprts
class MqttSampleScala {
//#protocol
val mqttProtocol = mqtt
// enable protocol version 3.1 (default: false)
.mqttVersion_3_1
// enable protocol version 3.1.1 (default: true)
.mqttVersion_3_1_1
// broker address (default: localhost:1883)
.broker("hostname", 1883)
// if TLS should be enabled (default: false)
.useTls(true)
// Used to specify KeyManagerFactory for each individual virtual user. Input is the 0-based incremental id of the virtual user.
.perUserKeyManagerFactory(userId => null.asInstanceOf[javax.net.ssl.KeyManagerFactory])
// clientIdentifier sent in the connect payload (of not set, Gatling will generate a random one)
.clientId("#{id}")
// if session should be cleaned during connect (default: true)
.cleanSession(true)
// optional credentials for connecting
.credentials("#{userName}", "#{password}")
// connections keep alive timeout
.keepAlive(30)
// use at-most-once QoS (default: true)
.qosAtMostOnce
// use at-least-once QoS (default: false)
.qosAtLeastOnce
// use exactly-once QoS (default: false)
.qosExactlyOnce
// enable retain (default: false)
.retain(false)
// send last will, possibly with specific QoS and retain
.lastWill(
LastWill("#{willTopic}", StringBody("#{willMessage}"))
.qosAtLeastOnce
.retain(true)
)
// max number of reconnects after connection crash (default: 3)
.reconnectAttemptsMax(1)
// reconnect delay after connection crash in millis (default: 100)
.reconnectDelay(1)
// reconnect delay exponential backoff (default: 1.5)
.reconnectBackoffMultiplier(1.5F)
// resend delay after send failure in millis (default: 5000)
.resendDelay(1000)
// resend delay exponential backoff (default: 1.0)
.resendBackoffMultiplier(2.0F)
// interval for timeout checker (default: 1 second)
.timeoutCheckInterval(1)
// check for pairing messages sent and messages received
.correlateBy(null)
//#protocol
//#connect
mqtt("Connecting").connect
//#connect
//#subscribe
mqtt("Subscribing")
.subscribe("#{myTopic}")
// optional, override default QoS
.qosAtMostOnce
//#subscribe
//#publish
mqtt("Publishing")
.publish("#{myTopic}")
.message(StringBody("#{myTextPayload}"))
//#publish
//#check
// subscribe and expect to receive a message within 100ms, without blocking flow
mqtt("Subscribing").subscribe("#{myTopic2}")
.expect(100.milliseconds)
// publish and await (block) until it receives a message withing 100ms
mqtt("Publishing").publish("#{myTopic}").message(StringBody("#{myPayload}"))
.await(100.milliseconds)
// optionally, define in which topic the expected message will be received
mqtt("Publishing").publish("#{myTopic}").message(StringBody("#{myPayload}"))
.await(100.milliseconds, "repub/#{myTopic}")
// optionally define check criteria to be applied on the matching received message
mqtt("Publishing")
.publish("#{myTopic}").message(StringBody("#{myPayload}"))
.await(100.milliseconds).check(jsonPath("$.error").notExists)
//#check
//#waitForMessages
exec(waitForMessages.timeout(100.milliseconds))
//#waitForMessages
//#sample
class MqttSample extends Simulation {
val mqttProtocol = mqtt
.broker("localhost", 1883)
.correlateBy(jsonPath("$.correlationId"))
val scn = scenario("MQTT Test")
.feed(csv("topics-and-payloads.csv"))
.exec(mqtt("Connecting").connect)
.exec(mqtt("Subscribing").subscribe("#{myTopic}"))
.exec(mqtt("Publishing").publish("#{myTopic}")
.message(StringBody("#{myTextPayload}"))
.expect(100.milliseconds).check(jsonPath("$.error").notExists))
setUp(scn.inject(rampUsersPerSec(10) to 1000 during (60)))
.protocols(mqttProtocol)
}
//#sample
}
| apache-2.0 |
primecloud-controller-org/pcc-java-sdk | src/main/java/jp/primecloud/auto/sdk/client/loadbalancer/DescribeLoadBalancer.java | 1593 | /*
* Copyright 2016 SCSK Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jp.primecloud.auto.sdk.client.loadbalancer;
import java.util.LinkedHashMap;
import java.util.Map;
import jp.primecloud.auto.sdk.JacksonUtils;
import jp.primecloud.auto.sdk.Requester;
import jp.primecloud.auto.sdk.model.loadbalancer.LoadBalancer;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
public class DescribeLoadBalancer {
protected Requester requester;
public DescribeLoadBalancer(Requester requester) {
this.requester = requester;
}
public LoadBalancer execute(Long loadBalancerNo) {
Map<String, String> parameters = new LinkedHashMap<String, String>();
parameters.put("LoadBalancerNo", loadBalancerNo.toString());
JsonNode jsonNode = requester.execute("/DescribeLoadBalancer", parameters);
jsonNode = JacksonUtils.getField(jsonNode, "LoadBalancer");
return JacksonUtils.toObject(jsonNode, new TypeReference<LoadBalancer>() {
});
}
}
| apache-2.0 |
wakandan/wire | wire-runtime/src/test/java/com/squareup/wire/protos/edgecases/NoFields.java | 814 | // Code generated by Wire protocol buffer compiler, do not edit.
// Source file: ../wire-runtime/src/test/proto/edge_cases.proto
package com.squareup.wire.protos.edgecases;
import com.squareup.wire.Message;
public final class NoFields extends Message {
private static final long serialVersionUID = 0L;
public NoFields() {
}
private NoFields(Builder builder) {
setBuilder(builder);
}
@Override
public boolean equals(Object other) {
return other instanceof NoFields;
}
@Override
public int hashCode() {
return 0;
}
public static final class Builder extends Message.Builder<NoFields> {
public Builder() {
}
public Builder(NoFields message) {
super(message);
}
@Override
public NoFields build() {
return new NoFields(this);
}
}
}
| apache-2.0 |
levymoreira/griffon | subprojects/griffon-pivot/src/test/java/griffon/pivot/support/adapters/TextInputSelectionAdapterTest.java | 1558 | /*
* Copyright 2008-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package griffon.pivot.support.adapters;
import griffon.core.CallableWithArgs;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class TextInputSelectionAdapterTest {
private TextInputSelectionAdapter adapter = new TextInputSelectionAdapter();
@Test
public void testSelectionChanged() {
final boolean[] invoked = new boolean[1];
CallableWithArgs<Void> callable = new CallableWithArgs<Void>() {
public Void call(Object... args) {
invoked[0] = true;
return null;
}
};
assertNull(adapter.getSelectionChanged());
adapter.selectionChanged(null, 0, 0);
assertFalse(invoked[0]);
adapter.setSelectionChanged(callable);
adapter.selectionChanged(null, 0, 0);
assertTrue(invoked[0]);
}
}
| apache-2.0 |
graniet/operative-framework | modules/pictures/exif.go | 4729 | package pictures
import (
"encoding/json"
"os"
"github.com/graniet/go-pretty/table"
"github.com/graniet/operative-framework/session"
"github.com/rwcarlsen/goexif/exif"
"github.com/rwcarlsen/goexif/mknote"
)
type PictureExifModule struct {
session.SessionModule
sess *session.Session `json:"-"`
Stream *session.Stream `json:"-"`
}
type Exif struct {
BitsPerSample []int `json:"BitsPerSample"`
ColorSpace []int `json:"ColorSpace"`
DateTime string `json:"DateTime"`
ExifIFDPointer []int `json:"ExifIFDPointer"`
ExifVersion string `json:"ExifVersion"`
ImageLength []int `json:"ImageLength"`
ImageWidth []int `json:"ImageWidth"`
Orientation []int `json:"Orientation"`
PhotometricInterpretation []int `json:"PhotometricInterpretation"`
PixelXDimension []int `json:"PixelXDimension"`
PixelYDimension []int `json:"PixelYDimension"`
ResolutionUnit []int `json:"ResolutionUnit"`
SamplesPerPixel []int `json:"SamplesPerPixel"`
Software string `json:"Software"`
ThumbJPEGInterchangeFormat []int `json:"ThumbJPEGInterchangeFormat"`
ThumbJPEGInterchangeFormatLength []int `json:"ThumbJPEGInterchangeFormatLength"`
XResolution []string `json:"XResolution"`
YResolution []string `json:"YResolution"`
}
func PushPictureExifModule(s *session.Session) *PictureExifModule {
mod := PictureExifModule{
sess: s,
Stream: &s.Stream,
}
mod.CreateNewParam("TARGET", "Target file", "", true, session.STRING)
mod.CreateNewParam("limit", "Limit search", "10", false, session.STRING)
return &mod
}
func (module *PictureExifModule) Name() string {
return "picture.exif"
}
func (module *PictureExifModule) Description() string {
return "View exif data on selected picture"
}
func (module *PictureExifModule) Author() string {
return "Tristan Granier"
}
func (module *PictureExifModule) GetType() []string {
return []string{
session.T_TARGET_FILE,
}
}
func (module *PictureExifModule) GetInformation() session.ModuleInformation {
information := session.ModuleInformation{
Name: module.Name(),
Description: module.Description(),
Author: module.Author(),
Type: module.GetType(),
Parameters: module.Parameters,
}
return information
}
func (module *PictureExifModule) Start() {
paramEnterprise, _ := module.GetParameter("TARGET")
target, err := module.sess.GetTarget(paramEnterprise.Value)
if err != nil {
module.sess.Stream.Error(err.Error())
return
}
fname := target.GetName()
f, err := os.Open(fname)
if err != nil {
module.sess.Stream.Error(err.Error())
return
}
// Optionally register camera makenote data parsing - currently Nikon and
// Canon are supported.
exif.RegisterParsers(mknote.All...)
x, err := exif.Decode(f)
if err != nil {
module.sess.Stream.Error(err.Error())
return
}
s, _ := x.MarshalJSON()
var e Exif
err = json.Unmarshal(s, &e)
if err != nil {
module.sess.Stream.Error(err.Error())
return
}
t := module.Stream.GenerateTable()
t.SetOutputMirror(os.Stdout)
t.AppendRow(table.Row{
"ThumbJPEGInterchangeFormatLength",
e.ThumbJPEGInterchangeFormatLength,
})
t.AppendRow(table.Row{
"XResolution",
e.XResolution,
})
t.AppendRow(table.Row{
"YResolution",
e.YResolution,
})
t.AppendRow(table.Row{
"ResolutionUnit",
e.ResolutionUnit,
})
t.AppendRow(table.Row{
"ExifVersion",
e.ExifVersion,
})
t.AppendRow(table.Row{
"ColorSpace",
e.ColorSpace,
})
t.AppendRow(table.Row{
"PixelXDimension",
e.PixelXDimension,
})
t.AppendRow(table.Row{
"PixelYDimension",
e.PixelYDimension,
})
t.AppendRow(table.Row{
"ImageWidth",
e.ImageWidth,
})
t.AppendRow(table.Row{
"ImageLength:",
e.ImageLength,
})
t.AppendRow(table.Row{
"PhotometricInterpretation",
e.PhotometricInterpretation,
})
t.AppendRow(table.Row{
"Software",
e.Software,
})
t.AppendRow(table.Row{
"DateTime",
e.DateTime,
})
t.AppendRow(table.Row{
"SamplesPerPixel",
e.SamplesPerPixel,
})
t.AppendRow(table.Row{
"ExifIFDPointer:",
e.ExifIFDPointer,
})
t.AppendRow(table.Row{
"ThumbJPEGInterchangeFormat",
e.ThumbJPEGInterchangeFormat,
})
t.AppendRow(table.Row{
"BitsPerSample:",
e.BitsPerSample,
})
result := target.NewResult()
result.Set("Date Time", e.DateTime)
result.Set("Exif Version", e.ExifVersion)
result.Set("Software", e.Software)
result.Save(module, target)
module.sess.Stream.Render(t)
}
| apache-2.0 |
jexp/idea2 | platform/lang-impl/src/com/intellij/psi/impl/cache/impl/VfsIndexer.java | 3665 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.psi.impl.cache.impl;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.ProgressManager;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.vfs.VirtualFileFilter;
import com.intellij.psi.PsiBundle;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
public class VfsIndexer {
private static final Logger LOG = Logger.getInstance("#com.intellij.psi.impl.cache.impl.VfsIndexer");
public static VirtualFile[] writeFileIndex(
OutputStream stream,
VirtualFile root,
VirtualFileFilter filter) throws IOException {
List<VirtualFile> result = new ArrayList<VirtualFile>();
ProgressIndicator progress = ProgressManager.getInstance().getProgressIndicator();
if (progress != null){
progress.setText2(PsiBundle.message("psi.scanning.files.in.folder.progress", root.getPresentableUrl()));
}
DataOutputStream out = stream == null ? null : new DataOutputStream(stream);
_writeFileIndex(out, root, filter, result);
if (out != null){
out.flush();
}
return result.toArray(new VirtualFile[result.size()]);
}
private static void _writeFileIndex(DataOutputStream out, VirtualFile file, VirtualFileFilter filter, List<VirtualFile> result) throws IOException {
ProgressManager.getInstance().checkCanceled();
result.add(file);
if (out != null){
out.writeUTF(file.getName());
}
VirtualFile[] children = file.getChildren();
if (children == null) {
if (out != null){
out.writeInt(0);
}
return;
}
int childrenCount = 0;
for (VirtualFile child : children) {
if (filter.accept(child)) childrenCount++;
}
if (out != null){
out.writeInt(childrenCount);
}
for (VirtualFile child : children) {
if (filter.accept(child)) {
_writeFileIndex(out, child, filter, result);
}
}
}
public static VirtualFile[] readFileIndex(
InputStream stream,
VirtualFile root,
VirtualFileFilter filter
) throws IOException {
List<VirtualFile> result = new ArrayList<VirtualFile>();
DataInputStream in = new DataInputStream(stream);
String rootName = in.readUTF();
LOG.assertTrue(root.getName().equals(rootName));
_readFileIndex(in, root, filter, result);
return result.toArray(new VirtualFile[result.size()]);
}
private static void _readFileIndex(DataInputStream in, VirtualFile file, VirtualFileFilter filter, List<VirtualFile> result) throws IOException {
ProgressManager.getInstance().checkCanceled();
result.add(file);
int childrenCount = in.readInt();
if (childrenCount == 0) return;
for (int i = 0; i < childrenCount; i++) {
String name = in.readUTF();
VirtualFile child = file != null ? file.findChild(name) : null;
if (child != null && !filter.accept(child)){
child = null;
}
_readFileIndex(in, child, filter, result);
}
}
}
| apache-2.0 |
jk1/intellij-community | python/src/com/jetbrains/python/console/PydevConsoleRunnerImpl.java | 40617 | // Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.jetbrains.python.console;
import com.google.common.collect.Lists;
import com.intellij.execution.ExecutionException;
import com.intellij.execution.ExecutionHelper;
import com.intellij.execution.ExecutionManager;
import com.intellij.execution.Executor;
import com.intellij.execution.configurations.EncodingEnvironmentUtil;
import com.intellij.execution.configurations.GeneralCommandLine;
import com.intellij.execution.configurations.ParamsGroup;
import com.intellij.execution.configurations.PtyCommandLine;
import com.intellij.execution.console.ConsoleExecuteAction;
import com.intellij.execution.console.ConsoleHistoryController;
import com.intellij.execution.console.LanguageConsoleView;
import com.intellij.execution.executors.DefaultRunExecutor;
import com.intellij.execution.process.ProcessAdapter;
import com.intellij.execution.process.ProcessEvent;
import com.intellij.execution.process.ProcessOutputTypes;
import com.intellij.execution.process.ProcessTerminatedListener;
import com.intellij.execution.runners.ConsoleTitleGen;
import com.intellij.execution.ui.RunContentDescriptor;
import com.intellij.execution.ui.actions.CloseAction;
import com.intellij.icons.AllIcons;
import com.intellij.ide.CommonActionsManager;
import com.intellij.ide.errorTreeView.NewErrorTreeViewPanel;
import com.intellij.idea.ActionsBundle;
import com.intellij.openapi.actionSystem.*;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.ModalityState;
import com.intellij.openapi.application.TransactionGuard;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.editor.Caret;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.actionSystem.EditorAction;
import com.intellij.openapi.editor.actionSystem.EditorWriteActionHandler;
import com.intellij.openapi.editor.actions.SplitLineAction;
import com.intellij.openapi.fileEditor.FileDocumentManager;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.ProgressManager;
import com.intellij.openapi.progress.Task;
import com.intellij.openapi.project.DumbAware;
import com.intellij.openapi.project.DumbAwareAction;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.projectRoots.Sdk;
import com.intellij.openapi.ui.Messages;
import com.intellij.openapi.util.Couple;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.util.io.StreamUtil;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.CharsetToolkit;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.wm.ToolWindow;
import com.intellij.psi.PsiFile;
import com.intellij.testFramework.LightVirtualFile;
import com.intellij.ui.GuiUtils;
import com.intellij.ui.JBColor;
import com.intellij.ui.SideBorder;
import com.intellij.ui.content.Content;
import com.intellij.util.ArrayUtil;
import com.intellij.util.Consumer;
import com.intellij.util.PathMappingSettings;
import com.intellij.util.TimeoutUtil;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.net.NetUtils;
import com.intellij.util.ui.MessageCategory;
import com.intellij.util.ui.UIUtil;
import com.intellij.xdebugger.XDebugProcess;
import com.intellij.xdebugger.XDebugProcessStarter;
import com.intellij.xdebugger.XDebugSession;
import com.intellij.xdebugger.XDebuggerManager;
import com.jetbrains.python.PythonHelper;
import com.jetbrains.python.console.actions.ShowVarsAction;
import com.jetbrains.python.console.pydev.ConsoleCommunicationListener;
import com.jetbrains.python.debugger.PyDebugRunner;
import com.jetbrains.python.debugger.PyDebugValue;
import com.jetbrains.python.debugger.PyVariableViewSettings;
import com.jetbrains.python.debugger.settings.PyDebuggerSettings;
import com.jetbrains.python.remote.PyRemotePathMapper;
import com.jetbrains.python.remote.PyRemoteSdkAdditionalDataBase;
import com.jetbrains.python.remote.PythonRemoteInterpreterManager;
import com.jetbrains.python.run.PythonCommandLineState;
import com.jetbrains.python.run.PythonRunParams;
import com.jetbrains.python.run.PythonTracebackFilter;
import com.jetbrains.python.sdk.PySdkUtil;
import icons.PythonIcons;
import org.apache.xmlrpc.XmlRpcException;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.awt.*;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.stream.Collectors;
import static com.intellij.execution.runners.AbstractConsoleRunnerWithHistory.registerActionShortcuts;
/**
* @author traff, oleg
*/
public class PydevConsoleRunnerImpl implements PydevConsoleRunner {
public static final String WORKING_DIR_AND_PYTHON_PATHS = "WORKING_DIR_AND_PYTHON_PATHS";
public static final String CONSOLE_START_COMMAND = "import sys; print('Python %s on %s' % (sys.version, sys.platform))\n" +
"sys.path.extend([" + WORKING_DIR_AND_PYTHON_PATHS + "])\n";
public static final String STARTED_BY_RUNNER = "startedByRunner";
private static final Logger LOG = Logger.getInstance(PydevConsoleRunnerImpl.class.getName());
@SuppressWarnings("SpellCheckingInspection")
public static final String PYDEV_PYDEVCONSOLE_PY = "pydev/pydevconsole.py";
public static final int PORTS_WAITING_TIMEOUT = 20000;
private final Project myProject;
private final String myTitle;
@Nullable private final String myWorkingDir;
private final Consumer<String> myRerunAction;
@NotNull private final Sdk mySdk;
protected int[] myPorts;
private PydevConsoleCommunication myPydevConsoleCommunication;
private PyConsoleProcessHandler myProcessHandler;
protected PythonConsoleExecuteActionHandler myConsoleExecuteActionHandler;
private final List<ConsoleListener> myConsoleListeners = ContainerUtil.createLockFreeCopyOnWriteList();
private final PyConsoleType myConsoleType;
@NotNull private final Map<String, String> myEnvironmentVariables;
private String myCommandLine;
@NotNull protected final PyConsoleOptions.PyConsoleSettings myConsoleSettings;
private final String[] myStatementsToExecute;
private static final long HANDSHAKE_TIMEOUT = 60000;
private RemoteConsoleProcessData myRemoteConsoleProcessData;
private String myConsoleTitle = null;
private PythonConsoleView myConsoleView;
public PydevConsoleRunnerImpl(@NotNull final Project project,
@NotNull Sdk sdk,
@NotNull final PyConsoleType consoleType,
@Nullable final String workingDir,
@NotNull Map<String, String> environmentVariables,
@NotNull PyConsoleOptions.PyConsoleSettings settingsProvider,
@NotNull Consumer<String> rerunAction, String... statementsToExecute) {
myProject = project;
mySdk = sdk;
myTitle = consoleType.getTitle();
myWorkingDir = workingDir;
myConsoleType = consoleType;
myEnvironmentVariables = environmentVariables;
myConsoleSettings = settingsProvider;
myStatementsToExecute = statementsToExecute;
myRerunAction = rerunAction;
}
public void setConsoleTitle(String consoleTitle) {
myConsoleTitle = consoleTitle;
}
private List<AnAction> fillToolBarActions(final DefaultActionGroup toolbarActions,
final RunContentDescriptor contentDescriptor) {
//toolbarActions.add(backspaceHandlingAction);
toolbarActions.add(createRerunAction());
List<AnAction> actions = ContainerUtil.newArrayList();
//stop
actions.add(createStopAction());
//close
actions.add(createCloseAction(contentDescriptor));
// run action
actions.add(
new ConsoleExecuteAction(myConsoleView, myConsoleExecuteActionHandler, myConsoleExecuteActionHandler.getEmptyExecuteAction(),
myConsoleExecuteActionHandler));
// Help
actions.add(CommonActionsManager.getInstance().createHelpAction("interactive_console"));
actions.add(new SoftWrapAction());
toolbarActions.addAll(actions);
actions.add(0, createRerunAction());
actions.add(PyConsoleUtil.createInterruptAction(myConsoleView));
actions.add(PyConsoleUtil.createTabCompletionAction(myConsoleView));
actions.add(createSplitLineAction());
toolbarActions.add(new ShowVarsAction(myConsoleView, myPydevConsoleCommunication));
toolbarActions.add(ConsoleHistoryController.getController(myConsoleView).getBrowseHistory());
toolbarActions.add(new ConnectDebuggerAction());
DefaultActionGroup settings = new DefaultActionGroup("Settings", true);
settings.getTemplatePresentation().setIcon(AllIcons.General.GearPlain);
settings.add(new PyVariableViewSettings.SimplifiedView(null));
settings.add(new PyVariableViewSettings.VariablesPolicyGroup());
toolbarActions.add(settings);
toolbarActions.add(new NewConsoleAction());
return actions;
}
@Override
public void open() {
PythonConsoleToolWindow toolWindow = PythonConsoleToolWindow.getInstance(myProject);
if (toolWindow != null && toolWindow.isInitialized()) {
toolWindow.getToolWindow().activate(() -> {
}, true);
}
else {
runSync(true);
}
}
@Override
public void runSync(boolean requestEditorFocus) {
myPorts = findAvailablePorts(myProject, myConsoleType);
assert myPorts != null;
GeneralCommandLine generalCommandLine = createCommandLine(mySdk, myEnvironmentVariables, myWorkingDir, myPorts);
myCommandLine = generalCommandLine.getCommandLineString();
try {
initAndRun(generalCommandLine);
ProgressManager.getInstance().run(new Task.Backgroundable(myProject, "Connecting to Console", false) {
@Override
public void run(@NotNull final ProgressIndicator indicator) {
indicator.setText("Connecting to console...");
connect(myStatementsToExecute);
if (requestEditorFocus) {
myConsoleView.requestFocus();
}
}
});
}
catch (ExecutionException e) {
LOG.warn("Error running console", e);
showErrorsInConsole(e);
}
}
@Override
public void run(boolean requestEditorFocus) {
TransactionGuard.submitTransaction(myProject, () -> FileDocumentManager.getInstance().saveAllDocuments());
myPorts = findAvailablePorts(myProject, myConsoleType);
assert myPorts != null;
GeneralCommandLine generalCommandLine = createCommandLine(mySdk, myEnvironmentVariables, myWorkingDir, myPorts);
myCommandLine = generalCommandLine.getCommandLineString();
UIUtil
.invokeLaterIfNeeded(() -> ProgressManager.getInstance().run(new Task.Backgroundable(myProject, "Connecting to Console", false) {
@Override
public void run(@NotNull final ProgressIndicator indicator) {
indicator.setText("Connecting to console...");
try {
initAndRun(generalCommandLine);
connect(myStatementsToExecute);
if (requestEditorFocus) {
myConsoleView.requestFocus();
}
}
catch (final Exception e) {
LOG.warn("Error running console", e);
UIUtil.invokeAndWaitIfNeeded((Runnable)() -> showErrorsInConsole(e));
}
}
}));
}
private void showErrorsInConsole(Exception e) {
DefaultActionGroup actionGroup = new DefaultActionGroup(createRerunAction());
final ActionToolbar actionToolbar = ActionManager.getInstance().createActionToolbar("PydevConsoleRunnerErrors",
actionGroup, false);
// Runner creating
final JPanel panel = new JPanel(new BorderLayout());
panel.add(actionToolbar.getComponent(), BorderLayout.WEST);
NewErrorTreeViewPanel errorViewPanel = new NewErrorTreeViewPanel(myProject, null, false, false, null);
String[] messages = StringUtil.isNotEmpty(e.getMessage()) ? StringUtil.splitByLines(e.getMessage()) : ArrayUtil.EMPTY_STRING_ARRAY;
if (messages.length == 0) {
messages = new String[]{"Unknown error"};
}
errorViewPanel.addMessage(MessageCategory.ERROR, messages, null, -1, -1, null);
panel.add(errorViewPanel, BorderLayout.CENTER);
final RunContentDescriptor contentDescriptor =
new RunContentDescriptor(null, myProcessHandler, panel, "Error running console");
actionGroup.add(createCloseAction(contentDescriptor));
showContentDescriptor(contentDescriptor);
}
protected void showContentDescriptor(RunContentDescriptor contentDescriptor) {
ToolWindow toolwindow = PythonConsoleToolWindow.getToolWindow(myProject);
if (toolwindow != null) {
toolwindow.getComponent().putClientProperty(STARTED_BY_RUNNER, "true");
PythonConsoleToolWindow.getInstance(myProject).init(toolwindow, contentDescriptor);
}
else {
ExecutionManager
.getInstance(myProject).getContentManager().showRunContent(getExecutor(), contentDescriptor);
}
}
private static Executor getExecutor() {
return DefaultRunExecutor.getRunExecutorInstance();
}
@Nullable
public static int[] findAvailablePorts(Project project, PyConsoleType consoleType) {
final int[] ports;
try {
// File "pydev/console/pydevconsole.py", line 223, in <module>
// port, client_port = sys.argv[1:3]
ports = NetUtils.findAvailableSocketPorts(2);
}
catch (IOException e) {
ExecutionHelper.showErrors(project, Collections.<Exception>singletonList(e), consoleType.getTitle(), null);
return null;
}
return ports;
}
protected GeneralCommandLine createCommandLine(@NotNull final Sdk sdk,
@NotNull final Map<String, String> environmentVariables,
@Nullable String workingDir,
@NotNull int[] ports) {
return doCreateConsoleCmdLine(sdk, environmentVariables, workingDir, ports);
}
protected PythonConsoleRunParams createConsoleRunParams(@Nullable String workingDir,
@NotNull Sdk sdk,
@NotNull Map<String, String> environmentVariables) {
return new PythonConsoleRunParams(myConsoleSettings, workingDir, sdk, environmentVariables);
}
@NotNull
protected GeneralCommandLine doCreateConsoleCmdLine(@NotNull Sdk sdk,
@NotNull Map<String, String> environmentVariables,
@Nullable String workingDir,
@NotNull int[] ports) {
final PythonConsoleRunParams runParams = createConsoleRunParams(workingDir, sdk, environmentVariables);
GeneralCommandLine cmd =
PythonCommandLineState.createPythonCommandLine(myProject, runParams, false,
PtyCommandLine.isEnabled() && !SystemInfo.isWindows);
cmd.withWorkDirectory(myWorkingDir);
ParamsGroup exeGroup = cmd.getParametersList().getParamsGroup(PythonCommandLineState.GROUP_EXE_OPTIONS);
if (exeGroup != null && !runParams.getInterpreterOptions().isEmpty()) {
exeGroup.addParametersString(runParams.getInterpreterOptions());
}
ParamsGroup group = cmd.getParametersList().getParamsGroup(PythonCommandLineState.GROUP_SCRIPT);
if (group == null) {
group = cmd.getParametersList().addParamsGroup(PythonCommandLineState.GROUP_SCRIPT);
}
PythonHelper.CONSOLE.addToGroup(group, cmd);
for (int port : ports) {
group.addParameter(String.valueOf(port));
}
return cmd;
}
private PythonConsoleView createConsoleView() {
PythonConsoleView consoleView = new PythonConsoleView(myProject, myTitle, mySdk, false);
myPydevConsoleCommunication.setConsoleFile(consoleView.getVirtualFile());
consoleView.addMessageFilter(new PythonTracebackFilter(myProject));
return consoleView;
}
@NotNull
private Process createProcess(@NotNull GeneralCommandLine generalCommandLine) throws ExecutionException {
if (PySdkUtil.isRemote(mySdk)) {
PythonRemoteInterpreterManager manager = PythonRemoteInterpreterManager.getInstance();
PyRemoteSdkAdditionalDataBase data = (PyRemoteSdkAdditionalDataBase)mySdk.getSdkAdditionalData();
final PyRemotePathMapper pathMapper = PydevConsoleRunner.getPathMapper(myProject, mySdk, myConsoleSettings);
if (manager != null && data != null && pathMapper != null) {
RemoteConsoleProcessData remoteConsoleProcessData =
PythonConsoleRemoteProcessCreatorKt.createRemoteConsoleProcess(generalCommandLine,
pathMapper,
myProject, data, getRunnerFileFromHelpers(),
myPorts[0], myPorts[1]);
myRemoteConsoleProcessData = remoteConsoleProcessData;
myCommandLine = remoteConsoleProcessData.getCommandLine();
myPydevConsoleCommunication = remoteConsoleProcessData.getPydevConsoleCommunication();
return remoteConsoleProcessData.getProcess();
}
throw new PythonRemoteInterpreterManager.PyRemoteInterpreterExecutionException();
}
else {
myCommandLine = generalCommandLine.getCommandLineString();
Map<String, String> envs = generalCommandLine.getEnvironment();
EncodingEnvironmentUtil.setLocaleEnvironmentIfMac(envs, generalCommandLine.getCharset());
final Process server = generalCommandLine.createProcess();
try {
myPydevConsoleCommunication = new PydevConsoleCommunication(myProject, myPorts[0], server, myPorts[1]);
}
catch (Exception e) {
throw new ExecutionException(e.getMessage());
}
return server;
}
}
protected String getRunnerFileFromHelpers() {
return PYDEV_PYDEVCONSOLE_PY;
}
public static Couple<Integer> getRemotePortsFromProcess(Process process) throws ExecutionException {
@SuppressWarnings("IOResourceOpenedButNotSafelyClosed") Scanner s = new Scanner(process.getInputStream());
return Couple.of(readInt(s, process), readInt(s, process));
}
private static int readInt(Scanner s, Process process) throws ExecutionException {
long started = System.currentTimeMillis();
StringBuilder sb = new StringBuilder();
boolean flag = false;
while (System.currentTimeMillis() - started < PORTS_WAITING_TIMEOUT) {
if (s.hasNextLine()) {
String line = s.nextLine();
sb.append(line).append("\n");
try {
int i = Integer.parseInt(line);
if (flag) {
LOG.warn("Unexpected strings in output:\n" + sb.toString());
}
return i;
}
catch (NumberFormatException ignored) {
flag = true;
continue;
}
}
TimeoutUtil.sleep(200);
if (process.exitValue() != 0) {
String error;
try {
error = "Console process terminated with error:\n" + StreamUtil.readText(process.getErrorStream()) + sb.toString();
}
catch (Exception ignored) {
error = "Console process terminated with exit code " + process.exitValue() + ", output:" + sb.toString();
}
throw new ExecutionException(error);
}
else {
break;
}
}
throw new ExecutionException("Couldn't read integer value from stream");
}
private PyConsoleProcessHandler createProcessHandler(final Process process) {
if (PySdkUtil.isRemote(mySdk)) {
PythonRemoteInterpreterManager manager = PythonRemoteInterpreterManager.getInstance();
if (manager != null) {
PyRemoteSdkAdditionalDataBase data = (PyRemoteSdkAdditionalDataBase)mySdk.getSdkAdditionalData();
assert data != null;
myProcessHandler =
manager.createConsoleProcessHandler(process, myConsoleView, myPydevConsoleCommunication,
myCommandLine, CharsetToolkit.UTF8_CHARSET,
manager.setupMappings(myProject, data, null),
myRemoteConsoleProcessData.getSocketProvider());
}
else {
LOG.error("Can't create remote console process handler");
}
}
else {
myProcessHandler = new PyConsoleProcessHandler(process, myConsoleView, myPydevConsoleCommunication, myCommandLine,
CharsetToolkit.UTF8_CHARSET);
}
return myProcessHandler;
}
private void initAndRun(@NotNull GeneralCommandLine generalCommandLine) throws ExecutionException {
// Create Server process
final Process process = createProcess(generalCommandLine);
UIUtil.invokeLaterIfNeeded(() -> {
// Init console view
myConsoleView = createConsoleView();
if (myConsoleView != null) {
myConsoleView.setBorder(new SideBorder(JBColor.border(), SideBorder.LEFT));
}
myPydevConsoleCommunication.setConsoleView(myConsoleView);
myProcessHandler = createProcessHandler(process);
myConsoleExecuteActionHandler = createExecuteActionHandler();
ProcessTerminatedListener.attach(myProcessHandler);
PythonConsoleView consoleView = myConsoleView;
myProcessHandler.addProcessListener(new ProcessAdapter() {
@Override
public void processTerminated(@NotNull ProcessEvent event) {
consoleView.setEditable(false);
}
});
// Attach to process
myConsoleView.attachToProcess(myProcessHandler);
createContentDescriptorAndActions();
// Run
myProcessHandler.startNotify();
});
}
protected void createContentDescriptorAndActions() {
// Runner creating
final DefaultActionGroup toolbarActions = new DefaultActionGroup();
final ActionToolbar actionToolbar = ActionManager.getInstance().createActionToolbar("PydevConsoleRunner", toolbarActions, false);
// Runner creating
final JPanel panel = new JPanel(new BorderLayout());
panel.add(actionToolbar.getComponent(), BorderLayout.WEST);
panel.add(myConsoleView.getComponent(), BorderLayout.CENTER);
actionToolbar.setTargetComponent(panel);
if (myConsoleTitle == null) {
myConsoleTitle = new ConsoleTitleGen(myProject, myTitle) {
@NotNull
@Override
protected List<String> getActiveConsoles(@NotNull String consoleTitle) {
PythonConsoleToolWindow toolWindow = PythonConsoleToolWindow.getInstance(myProject);
if (toolWindow != null && toolWindow.isInitialized() && toolWindow.getToolWindow() != null) {
return Lists.newArrayList(toolWindow.getToolWindow().getContentManager().getContents()).stream().map(c -> c.getDisplayName())
.collect(
Collectors.toList());
}
else {
return super.getActiveConsoles(consoleTitle);
}
}
}.makeTitle();
}
final RunContentDescriptor contentDescriptor =
new RunContentDescriptor(myConsoleView, myProcessHandler, panel, myConsoleTitle, null);
Disposer.register(myProject, contentDescriptor);
contentDescriptor.setFocusComputable(() -> myConsoleView.getConsoleEditor().getContentComponent());
contentDescriptor.setAutoFocusContent(true);
// tool bar actions
final List<AnAction> actions = fillToolBarActions(toolbarActions, contentDescriptor);
registerActionShortcuts(actions, myConsoleView.getConsoleEditor().getComponent());
registerActionShortcuts(actions, panel);
getConsoleView().addConsoleFolding(false, false);
showContentDescriptor(contentDescriptor);
}
private void connect(final String[] statements2execute) {
if (handshake()) {
ApplicationManager.getApplication().invokeLater(() -> {
// Propagate console communication to language console
final PythonConsoleView consoleView = myConsoleView;
consoleView.setConsoleCommunication(myPydevConsoleCommunication);
consoleView.setSdk(mySdk);
consoleView.setExecutionHandler(myConsoleExecuteActionHandler);
myProcessHandler.addProcessListener(new ProcessAdapter() {
@Override
public void onTextAvailable(@NotNull ProcessEvent event, @NotNull Key outputType) {
consoleView.print(event.getText(), outputType);
}
});
enableConsoleExecuteAction();
if (statements2execute.length == 1 && statements2execute[0].isEmpty()) {
statements2execute[0] = "\t";
}
for (String statement : statements2execute) {
consoleView.executeStatement(statement + "\n", ProcessOutputTypes.SYSTEM);
}
fireConsoleInitializedEvent(consoleView);
consoleView.initialized();
});
}
else {
myConsoleView.print("Couldn't connect to console process.", ProcessOutputTypes.STDERR);
myProcessHandler.destroyProcess();
myConsoleView.setEditable(false);
}
}
protected AnAction createRerunAction() {
return new RestartAction(this);
}
private void enableConsoleExecuteAction() {
myConsoleExecuteActionHandler.setEnabled(true);
}
private boolean handshake() {
boolean res;
long started = System.currentTimeMillis();
do {
try {
res = myPydevConsoleCommunication.handshake();
}
catch (XmlRpcException ignored) {
res = false;
}
if (res) {
break;
}
else {
long now = System.currentTimeMillis();
if (now - started > HANDSHAKE_TIMEOUT) {
break;
}
else {
TimeoutUtil.sleep(100);
}
}
}
while (true);
return res;
}
private AnAction createStopAction() {
AnAction generalStopAction = ActionManager.getInstance().getAction(IdeActions.ACTION_STOP_PROGRAM);
final AnAction stopAction = new DumbAwareAction() {
@Override
public void update(AnActionEvent e) {
generalStopAction.update(e);
}
@Override
public void actionPerformed(AnActionEvent e) {
e = stopConsole(e);
generalStopAction.actionPerformed(e);
}
};
stopAction.copyFrom(generalStopAction);
return stopAction;
}
private class SoftWrapAction extends ToggleAction implements DumbAware {
private boolean isSelected = myConsoleSettings.isUseSoftWraps();
SoftWrapAction() {
super(ActionsBundle.actionText("EditorToggleUseSoftWraps"), ActionsBundle.actionDescription("EditorToggleUseSoftWraps"),
AllIcons.Actions.ToggleSoftWrap);
updateEditors();
}
@Override
public boolean isSelected(AnActionEvent e) {
return isSelected;
}
private void updateEditors() {
myConsoleView.getEditor().getSettings().setUseSoftWraps(isSelected);
myConsoleView.getConsoleEditor().getSettings().setUseSoftWraps(isSelected);
}
@Override
public void setSelected(AnActionEvent e, boolean state) {
isSelected = state;
updateEditors();
myConsoleSettings.setUseSoftWraps(isSelected);
}
}
private AnAction createCloseAction(final RunContentDescriptor descriptor) {
final AnAction generalCloseAction = new CloseAction(getExecutor(), descriptor, myProject);
final AnAction stopAction = new DumbAwareAction() {
@Override
public void update(AnActionEvent e) {
generalCloseAction.update(e);
}
@Override
public void actionPerformed(AnActionEvent e) {
e = stopConsole(e);
clearContent(descriptor);
generalCloseAction.actionPerformed(e);
}
};
stopAction.copyFrom(generalCloseAction);
return stopAction;
}
protected void clearContent(RunContentDescriptor descriptor) {
PythonConsoleToolWindow toolWindow = PythonConsoleToolWindow.getInstance(myProject);
if (toolWindow != null && toolWindow.getToolWindow() != null) {
Content content = toolWindow.getToolWindow().getContentManager().findContent(descriptor.getDisplayName());
assert content != null;
toolWindow.getToolWindow().getContentManager().removeContent(content, true);
}
}
private AnActionEvent stopConsole(AnActionEvent e) {
if (myPydevConsoleCommunication != null) {
e = new AnActionEvent(e.getInputEvent(), e.getDataContext(), e.getPlace(),
e.getPresentation(), e.getActionManager(), e.getModifiers());
try {
closeCommunication();
// waiting for REPL communication before destroying process handler
Thread.sleep(300);
}
catch (Exception ignored) {
// Ignore
}
}
return e;
}
protected AnAction createSplitLineAction() {
class ConsoleSplitLineAction extends EditorAction {
private static final String CONSOLE_SPLIT_LINE_ACTION_ID = "Console.SplitLine";
public ConsoleSplitLineAction() {
super(new EditorWriteActionHandler() {
private final SplitLineAction mySplitLineAction = new SplitLineAction();
@Override
public boolean isEnabled(Editor editor, DataContext dataContext) {
return mySplitLineAction.getHandler().isEnabled(editor, dataContext);
}
@Override
public void executeWriteAction(Editor editor, @Nullable Caret caret, DataContext dataContext) {
((EditorWriteActionHandler)mySplitLineAction.getHandler()).executeWriteAction(editor, caret, dataContext);
editor.getCaretModel().getCurrentCaret().moveCaretRelatively(0, 1, false, true);
}
});
}
public void setup() {
EmptyAction.setupAction(this, CONSOLE_SPLIT_LINE_ACTION_ID, null);
}
}
ConsoleSplitLineAction action = new ConsoleSplitLineAction();
action.setup();
return action;
}
private void closeCommunication() {
if (!myProcessHandler.isProcessTerminated()) {
myPydevConsoleCommunication.close();
}
}
@NotNull
protected PythonConsoleExecuteActionHandler createExecuteActionHandler() {
myConsoleExecuteActionHandler =
new PydevConsoleExecuteActionHandler(myConsoleView, myProcessHandler, myPydevConsoleCommunication);
myConsoleExecuteActionHandler.setEnabled(false);
new ConsoleHistoryController(myConsoleType.getTypeId(), "", myConsoleView).install();
return myConsoleExecuteActionHandler;
}
@Override
public PydevConsoleCommunication getPydevConsoleCommunication() {
return myPydevConsoleCommunication;
}
static VirtualFile getConsoleFile(PsiFile psiFile) {
VirtualFile file = psiFile.getViewProvider().getVirtualFile();
if (file instanceof LightVirtualFile) {
file = ((LightVirtualFile)file).getOriginalFile();
}
return file;
}
@Override
public void addConsoleListener(ConsoleListener consoleListener) {
myConsoleListeners.add(consoleListener);
}
private void fireConsoleInitializedEvent(LanguageConsoleView consoleView) {
for (ConsoleListener listener : myConsoleListeners) {
listener.handleConsoleInitialized(consoleView);
}
myConsoleListeners.clear();
}
@Override
public PythonConsoleExecuteActionHandler getConsoleExecuteActionHandler() {
return myConsoleExecuteActionHandler;
}
private static class RestartAction extends AnAction {
private final PydevConsoleRunnerImpl myConsoleRunner;
private RestartAction(PydevConsoleRunnerImpl runner) {
copyFrom(ActionManager.getInstance().getAction(IdeActions.ACTION_RERUN));
getTemplatePresentation().setIcon(AllIcons.Actions.Restart);
myConsoleRunner = runner;
}
@Override
public void actionPerformed(AnActionEvent e) {
myConsoleRunner.rerun();
}
}
private void rerun() {
new Task.Backgroundable(myProject, "Restarting Console", true) {
@Override
public void run(@NotNull ProgressIndicator indicator) {
if (myProcessHandler != null) {
UIUtil.invokeAndWaitIfNeeded((Runnable)() -> closeCommunication());
boolean processStopped = myProcessHandler.waitFor(5000L);
if (!processStopped && myProcessHandler.canKillProcess()) {
myProcessHandler.killProcess();
}
myProcessHandler.waitFor();
}
GuiUtils.invokeLaterIfNeeded(() -> myRerunAction.consume(myConsoleTitle), ModalityState.defaultModalityState());
}
}.queue();
}
private class ConnectDebuggerAction extends ToggleAction implements DumbAware {
private boolean mySelected = false;
private XDebugSession mySession = null;
public ConnectDebuggerAction() {
super("Attach Debugger", "Enables tracing of code executed in console", AllIcons.Actions.StartDebugger);
}
@Override
public boolean isSelected(AnActionEvent e) {
return mySelected;
}
@Override
public void update(@NotNull AnActionEvent e) {
if (mySession != null) {
e.getPresentation().setEnabled(false);
}
else {
super.update(e);
e.getPresentation().setEnabled(true);
}
}
@Override
public void setSelected(AnActionEvent e, boolean state) {
mySelected = state;
if (mySelected) {
try {
mySession = connectToDebugger();
}
catch (Exception e1) {
LOG.error(e1);
Messages.showErrorDialog("Can't connect to debugger", "Error Connecting Debugger");
}
}
else {
//TODO: disable debugging
}
}
}
private static class NewConsoleAction extends AnAction implements DumbAware {
public NewConsoleAction() {
super("New Console", "Creates new python console", AllIcons.General.Add);
}
@Override
public void update(AnActionEvent e) {
e.getPresentation().setEnabled(true);
}
@Override
public void actionPerformed(AnActionEvent e) {
final Project project = e.getData(CommonDataKeys.PROJECT);
if (project != null) {
PydevConsoleRunner runner =
PythonConsoleRunnerFactory.getInstance().createConsoleRunner(project, e.getData(LangDataKeys.MODULE));
runner.run(true);
}
}
}
private XDebugSession connectToDebugger() throws ExecutionException {
final ServerSocket serverSocket = PythonCommandLineState.createServerSocket();
return XDebuggerManager.getInstance(myProject).
startSessionAndShowTab("Python Console Debugger", PythonIcons.Python.Python, null, true, new XDebugProcessStarter() {
@NotNull
public XDebugProcess start(@NotNull final XDebugSession session) {
PythonDebugLanguageConsoleView debugConsoleView = new PythonDebugLanguageConsoleView(myProject, mySdk);
PyConsoleDebugProcessHandler consoleDebugProcessHandler =
new PyConsoleDebugProcessHandler(myProcessHandler);
PyConsoleDebugProcess consoleDebugProcess =
new PyConsoleDebugProcess(session, serverSocket, debugConsoleView,
consoleDebugProcessHandler);
PythonDebugConsoleCommunication communication =
PyDebugRunner.initDebugConsoleView(myProject, consoleDebugProcess, debugConsoleView, consoleDebugProcessHandler, session);
communication.addCommunicationListener(new ConsoleCommunicationListener() {
@Override
public void commandExecuted(boolean more) {
session.rebuildViews();
}
@Override
public void inputRequested() {
}
});
myPydevConsoleCommunication.setDebugCommunication(communication);
debugConsoleView.attachToProcess(consoleDebugProcessHandler);
consoleDebugProcess.waitForNextConnection();
try {
consoleDebugProcess.connect(myPydevConsoleCommunication);
}
catch (Exception e) {
LOG.error(e); //TODO
}
myProcessHandler.notifyTextAvailable("\nDebugger connected.\n", ProcessOutputTypes.STDERR);
return consoleDebugProcess;
}
});
}
@Override
public PyConsoleProcessHandler getProcessHandler() {
return myProcessHandler;
}
@Override
public PythonConsoleView getConsoleView() {
return myConsoleView;
}
public static PythonConsoleRunnerFactory factory() {
return new PydevConsoleRunnerFactory();
}
public static class PythonConsoleRunParams implements PythonRunParams {
private final PyConsoleOptions.PyConsoleSettings myConsoleSettings;
private final String myWorkingDir;
private final Sdk mySdk;
private final Map<String, String> myEnvironmentVariables;
public PythonConsoleRunParams(@NotNull PyConsoleOptions.PyConsoleSettings consoleSettings,
@Nullable String workingDir,
@NotNull Sdk sdk,
@NotNull Map<String, String> envs) {
myConsoleSettings = consoleSettings;
myWorkingDir = workingDir;
mySdk = sdk;
myEnvironmentVariables = envs;
myEnvironmentVariables.putAll(consoleSettings.getEnvs());
PyDebuggerSettings debuggerSettings = PyDebuggerSettings.getInstance();
if (debuggerSettings.getValuesPolicy() != PyDebugValue.ValuesPolicy.SYNC) {
myEnvironmentVariables.put(PyDebugValue.POLICY_ENV_VARS.get(debuggerSettings.getValuesPolicy()), "True");
}
}
@Override
public String getInterpreterOptions() {
return myConsoleSettings.getInterpreterOptions();
}
@Override
public void setInterpreterOptions(String interpreterOptions) {
throw new UnsupportedOperationException();
}
@Override
public String getWorkingDirectory() {
return myWorkingDir;
}
@Override
public void setWorkingDirectory(String workingDirectory) {
throw new UnsupportedOperationException();
}
@Nullable
@Override
public String getSdkHome() {
return mySdk.getHomePath();
}
@Override
public void setSdkHome(String sdkHome) {
throw new UnsupportedOperationException();
}
@Override
public void setModule(Module module) {
throw new UnsupportedOperationException();
}
@Override
public String getModuleName() {
return myConsoleSettings.getModuleName();
}
@Override
public boolean isUseModuleSdk() {
return myConsoleSettings.isUseModuleSdk();
}
@Override
public void setUseModuleSdk(boolean useModuleSdk) {
throw new UnsupportedOperationException();
}
@Override
public boolean isPassParentEnvs() {
return myConsoleSettings.isPassParentEnvs();
}
@Override
public void setPassParentEnvs(boolean passParentEnvs) {
throw new UnsupportedOperationException();
}
@Override
public Map<String, String> getEnvs() {
return myEnvironmentVariables;
}
@Override
public void setEnvs(Map<String, String> envs) {
throw new UnsupportedOperationException();
}
@Nullable
@Override
public PathMappingSettings getMappingSettings() {
throw new UnsupportedOperationException();
}
@Override
public void setMappingSettings(@Nullable PathMappingSettings mappingSettings) {
throw new UnsupportedOperationException();
}
@Override
public boolean shouldAddContentRoots() {
return myConsoleSettings.shouldAddContentRoots();
}
@Override
public boolean shouldAddSourceRoots() {
return myConsoleSettings.shouldAddSourceRoots();
}
@Override
public void setAddContentRoots(boolean flag) {
throw new UnsupportedOperationException();
}
@Override
public void setAddSourceRoots(boolean flag) {
throw new UnsupportedOperationException();
}
}
}
| apache-2.0 |
chenjun0210/HanLP | src/main/java/com/hankcs/hanlp/mining/word2vec/TextFileCorpus.java | 6296 | package com.hankcs.hanlp.mining.word2vec;
import java.io.*;
import java.util.TreeMap;
public class TextFileCorpus extends Corpus
{
private static final int VOCAB_MAX_SIZE = 30000000;
private int minReduce = 1;
private BufferedReader raf = null;
private DataOutputStream cache;
public TextFileCorpus(Config config) throws IOException
{
super(config);
}
@Override
public void shutdown() throws IOException
{
Utils.closeQuietly(raf);
wordsBuffer = null;
}
@Override
public void rewind(int numThreads, int id) throws IOException
{
super.rewind(numThreads, id);
}
@Override
public String nextWord() throws IOException
{
return readWord(raf);
}
/**
* Reduces the vocabulary by removing infrequent tokens
*/
void reduceVocab()
{
table = new int[vocabSize];
int j = 0;
for (int i = 0; i < vocabSize; i++)
{
if (vocab[i].cn > minReduce)
{
vocab[j].cn = vocab[i].cn;
vocab[j].word = vocab[i].word;
table[vocabIndexMap.get(vocab[j].word)] = j;
j++;
}
else
{
table[vocabIndexMap.get(vocab[j].word)] = -4;
}
}
// adjust the index in the cache
try
{
cache.close();
File fixingFile = new File(cacheFile.getAbsolutePath() + ".fixing");
cache = new DataOutputStream(new FileOutputStream(fixingFile));
DataInputStream oldCache = new DataInputStream(new FileInputStream(cacheFile));
while (oldCache.available() >= 4)
{
int oldId = oldCache.readInt();
if (oldId < 0)
{
cache.writeInt(oldId);
continue;
}
int id = table[oldId];
if (id == -4) continue;
cache.writeInt(id);
}
oldCache.close();
cache.close();
if (!fixingFile.renameTo(cacheFile))
{
throw new RuntimeException(String.format("moving %s to %s failed", fixingFile.getAbsolutePath(), cacheFile.getName()));
}
cache = new DataOutputStream(new FileOutputStream(cacheFile));
}
catch (IOException e)
{
throw new RuntimeException(String.format("failed to adjust cache file", e));
}
table = null;
vocabSize = j;
vocabIndexMap.clear();
for (int i = 0; i < vocabSize; i++)
{
vocabIndexMap.put(vocab[i].word, i);
}
minReduce++;
}
public void learnVocab() throws IOException
{
vocab = new VocabWord[vocabMaxSize];
vocabIndexMap = new TreeMap<String, Integer>();
vocabSize = 0;
final File trainFile = new File(config.getInputFile());
BufferedReader raf = null;
FileInputStream fileInputStream = null;
cache = null;
vocabSize = 0;
TrainingCallback callback = config.getCallback();
try
{
fileInputStream = new FileInputStream(trainFile);
raf = new BufferedReader(new InputStreamReader(fileInputStream, encoding));
cacheFile = File.createTempFile(String.format("corpus_%d", System.currentTimeMillis()), ".bin");
cache = new DataOutputStream(new FileOutputStream(cacheFile));
while (true)
{
String word = readWord(raf);
if (word == null && eoc) break;
trainWords++;
if (trainWords % 100000 == 0)
{
if (callback == null)
{
System.err.printf("%c%.2f%% %dK", 13,
(1.f - fileInputStream.available() / (float) trainFile.length()) * 100.f,
trainWords / 1000);
System.err.flush();
}
else
{
callback.corpusLoading((1.f - fileInputStream.available() / (float) trainFile.length()) * 100.f);
}
}
int idx = searchVocab(word);
if (idx == -1)
{
idx = addWordToVocab(word);
vocab[idx].cn = 1;
}
else vocab[idx].cn++;
if (vocabSize > VOCAB_MAX_SIZE * 0.7)
{
reduceVocab();
idx = searchVocab(word);
}
cache.writeInt(idx);
}
}
finally
{
Utils.closeQuietly(fileInputStream);
Utils.closeQuietly(raf);
Utils.closeQuietly(cache);
System.err.println();
}
if (callback == null)
{
System.err.printf("%c100%% %dK", 13, trainWords / 1000);
System.err.flush();
}
else
{
callback.corpusLoading(100);
callback.corpusLoaded(vocabSize, trainWords, trainWords);
}
}
String[] wordsBuffer = new String[0];
int wbp = wordsBuffer.length;
/**
* Reads a single word from a file, assuming space + tab + EOL to be word boundaries
*
* @param raf
* @return null if EOF
* @throws IOException
*/
String readWord(BufferedReader raf) throws IOException
{
while (true)
{
// check the buffer first
if (wbp < wordsBuffer.length)
{
return wordsBuffer[wbp++];
}
String line = raf.readLine();
if (line == null)
{ // end of corpus
eoc = true;
return null;
}
line = line.trim();
if (line.length() == 0)
{
continue;
}
cache.writeInt(-3); // mark end of sentence
wordsBuffer = line.split("\\s+");
wbp = 0;
eoc = false;
}
}
}
| apache-2.0 |
is-apps/WebproxyPortlet | src/main/java/org/jasig/portlet/proxy/search/GsaSearchStrategy.java | 6764 | /**
* Licensed to Apereo under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Apereo licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at the following location:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jasig.portlet.proxy.search;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import javax.portlet.EventRequest;
import javax.portlet.PortletMode;
import javax.portlet.WindowState;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
import org.apache.http.HttpResponse;
import org.apache.http.impl.client.DecompressingHttpClient;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.jasig.portal.search.PortletUrl;
import org.jasig.portal.search.PortletUrlParameter;
import org.jasig.portal.search.PortletUrlType;
import org.jasig.portal.search.SearchRequest;
import org.jasig.portal.search.SearchResult;
import org.jasig.portlet.proxy.search.util.SearchUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Required;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
public class GsaSearchStrategy implements ISearchStrategy {
protected final Logger log = LoggerFactory.getLogger(this.getClass());
private ISearchService contentSearchProvider;
@Required
@Resource(name="contentSearchProvider")
public void setContentSearchProvider(ISearchService contentSearchProvider) {
this.contentSearchProvider = contentSearchProvider;
}
@PostConstruct
public void init() {
contentSearchProvider.addSearchStrategy(this);
}
@Override
public String getStrategyName() {
return "GSA";
}
@Override
public List<SearchResult> search(SearchRequest searchQuery,
EventRequest request, org.jsoup.nodes.Document ignore) {
List<SearchResult> searchResults = new ArrayList<SearchResult>();
String searchBaseURL = this.buildGsaUrl(searchQuery, request);
HttpClient client = new DecompressingHttpClient(new DefaultHttpClient());
HttpGet get = new HttpGet(searchBaseURL);
try {
HttpResponse httpResponse = client.execute(get);
log.debug("STATUS CODE :: "+httpResponse.getStatusLine().getStatusCode());
InputStream in = httpResponse.getEntity().getContent();
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = docFactory.newDocumentBuilder();
Document doc = builder.parse(in);
log.debug(("GOT InputSource"));
XPathFactory factory=XPathFactory.newInstance();
XPath xPath=factory.newXPath();
Integer maxCount = Integer.parseInt(xPath.evaluate("count(/GSP/RES/R)", doc));
final String[] whitelistRegexes = request.getPreferences().getValues("gsaWhitelistRegex", new String[] {});
log.debug(maxCount + " -- Results");
for (int count = 1; count <= maxCount; count++ ) {
String u = xPath.evaluate("/GSP/RES/R["+count+"]/U/text()", doc);
String t = xPath.evaluate("/GSP/RES/R["+count+"]/T/text()", doc);
String s = xPath.evaluate("/GSP/RES/R["+count+"]/S/text()", doc);
log.debug("title: [" + t + "]");
SearchResult result = new SearchResult();
result.setTitle(t);
result.setSummary(s);
PortletUrl pUrl = new PortletUrl();
pUrl.setPortletMode(PortletMode.VIEW.toString());
pUrl.setType(PortletUrlType.RENDER);
pUrl.setWindowState(WindowState.MAXIMIZED.toString());
PortletUrlParameter param = new PortletUrlParameter();
param.setName("proxy.url");
param.getValue().add(u);
pUrl.getParam().add(param);
result.setPortletUrl(pUrl);
new SearchUtil().updateUrls(u, request, whitelistRegexes);
searchResults.add(result);
}
} catch (IOException ex) {
log.error(ex.getMessage(),ex);
} catch (XPathExpressionException ex) {
log.error(ex.getMessage(),ex);
} catch (ParserConfigurationException ex) {
log.error(ex.getMessage(),ex);
} catch (SAXException ex) {
log.error(ex.getMessage(),ex);
}
return searchResults;
}
private String buildGsaUrl(SearchRequest searchQuery, EventRequest request) {
String searchTerms = "";
try {
searchTerms = URLEncoder.encode(searchQuery.getSearchTerms(), "UTF-8");
} catch (UnsupportedEncodingException e) {
log.warn("Search term cannot be converted to UTF-8",e);
}
String gsa = request.getPreferences().getValue("gsaHost", "");
String collection = request.getPreferences().getValue("gsaCollection", "");
String frontend = request.getPreferences().getValue("gsaFrontend", "");
if (gsa.equals("") || collection.equals("") || frontend.equals("")) {
log.info("NOT Configured for search -- GSA:"+gsa+" -- COLLECTION:"+collection+" -- frontend:"+frontend);
}
String searchBaseURL = "http://"+gsa+"/search?q="+searchTerms+"&site="+ collection +"&client="+frontend+"&output=xml_no_dtd";
log.debug(searchBaseURL);
return searchBaseURL;
}
}
| apache-2.0 |
tlehoux/camel | camel-core/src/test/java/org/apache/camel/processor/interceptor/TracerTest.java | 3766 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.processor.interceptor;
import org.apache.camel.CamelContext;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.mock.MockEndpoint;
import org.apache.camel.impl.JndiRegistry;
import org.junit.Ignore;
/**
* @version
*/
public class TracerTest extends ContextTestSupport {
private Tracer tracer;
@Override
protected JndiRegistry createRegistry() throws Exception {
JndiRegistry jndi = super.createRegistry();
jndi.bind("traceFormatter", new DefaultTraceFormatter());
return jndi;
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
tracer = Tracer.createTracer(context);
tracer.setEnabled(true);
tracer.setTraceInterceptors(true);
tracer.setTraceFilter(body().contains("Camel"));
tracer.setTraceExceptions(true);
tracer.setLogStackTrace(true);
tracer.setUseJpa(false);
tracer.setDestination(context.getEndpoint("mock:traced"));
context.addInterceptStrategy(tracer);
tracer.start();
return context;
}
@Override
protected void tearDown() throws Exception {
tracer.stop();
super.tearDown();
}
public void testTracer() throws Exception {
MockEndpoint tracer = getMockEndpoint("mock:traced");
tracer.expectedMessageCount(1);
MockEndpoint result = getMockEndpoint("mock:result");
result.expectedMessageCount(3);
template.sendBody("direct:start", "Hello World");
template.sendBody("direct:start", "Bye World");
template.sendBody("direct:start", "Hello Camel");
assertMockEndpointsSatisfied();
DefaultTraceEventMessage em = tracer.getReceivedExchanges().get(0).getIn().getBody(DefaultTraceEventMessage.class);
assertEquals("Hello Camel", em.getBody());
assertEquals("String", em.getBodyType());
assertEquals(null, em.getCausedByException());
assertNotNull(em.getExchangeId());
assertNotNull(em.getShortExchangeId());
assertNotNull(em.getExchangePattern());
assertEquals("direct://start", em.getFromEndpointUri());
// there is always a breadcrumb header
assertNotNull(em.getHeaders());
assertNotNull(em.getProperties());
assertNull(em.getOutBody());
assertNull(em.getOutBodyType());
assertNull(em.getOutHeaders());
assertNull(em.getPreviousNode());
assertNotNull(em.getToNode());
assertNotNull(em.getTimestamp());
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start").to("mock:result");
}
};
}
}
| apache-2.0 |
Glasgow2015/team-1 | app/src/main/java/jp/com/beetracker/MainActivity.java | 5239 | package jp.com.beetracker;
import android.content.Entity;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import com.google.gson.Gson;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.util.EntityUtils;
import org.json.JSONObject;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.util.Date;
public class MainActivity extends AppCompatActivity {
public static DBHelper mydatabase;
public static int id;
public static String POST(String url,String json) {
InputStream inputStream = null;
String result = "";
try {
HttpGet httpGet = new HttpGet();
HttpClient httpclient = new DefaultHttpClient();
HttpPost httpPost = new HttpPost(url);
StringEntity se = new StringEntity(json);
httpPost.setEntity(se);
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
HttpResponse httpResponse = httpclient.execute(httpPost);
inputStream = httpResponse.getEntity().getContent();
if (inputStream != null) {
//Log.d("v", EntityUtils.toString(httpResponse.getEntity()));
result = convertInputStreamToString(inputStream);
}else {
result = "Did not work!";
mydatabase.insertJson(id++, json);
}
} catch (Exception e) {
Log.d("InputStream", e.getLocalizedMessage());
mydatabase.insertJson(id++,json);
}
return result;
}
private static String convertInputStreamToString(InputStream inputStream) throws IOException{
BufferedReader bufferedReader = new BufferedReader( new InputStreamReader(inputStream));
String line = "";
String result = "";
while((line = bufferedReader.readLine()) != null)
result += line;
inputStream.close();
return result;
}
@Override
protected void onDestroy() {
super.onDestroy();
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
//SunExposure sun, HiveType hivetype, Date date, Double lat, Double lng, int hiveNo, String apiaryName
Hive hive = new Hive(new Date().getTime(),40.44,54.00,0,"Apiary2");
//HiveWrapper hiveWrapper = new HiveWrapper("hives",hive);
hive.setTable("hives");
Gson gson = new Gson();
String json = gson.toJson(hive);
int num = json.length();
String url = "http://54.74.114.193:8000/ap/hives/";
MainActivity.POST(url,"{\"number\":\"1\",\"apname\":\"test\",\"log\":\"54.43\",\"lat\":\"23.4\",\"date\":\"45435435453\"}");
// // Defined URL where to send data
// URL uri = new URL("http://ec2-54-74-114-193.eu-west-1.compute.amazonaws.com");
//
// // Send POST data request
//
// URLConnection conn = uri.openConnection();
// conn.setDoOutput(true);
// OutputStreamWriter wr = new OutputStreamWriter(conn.getOutputStream());
// wr.write("hello");
// wr.flush();
//
// // Get the server response
//
// BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
// StringBuilder sb = new StringBuilder();
// String line = null;
//
// // Read Server Response
// String text = "";
// while((line = reader.readLine()) != null)
// {
// // Append server response in string
// sb.append(line + "\n");
// }
// text = sb.toString();
// }catch(Exception ex){
//
// }finally{
// try
// {
//
// //reader.close();
// }
//
// catch(Exception ex) {}
// }
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.menu_main, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
//noinspection SimplifiableIfStatement
if (id == R.id.action_settings) {
return true;
}
return super.onOptionsItemSelected(item);
}
}
| apache-2.0 |
shiver-me-timbers/smt-waiting-parent | smt-waiting-test/smt-waiting-integration-test/src/test/java/shiver/me/timbers/waiting/ITManualWaiter.java | 3013 | package shiver.me.timbers.waiting;
import shiver.me.timbers.waiting.execution.ManualWaitingExclude;
import shiver.me.timbers.waiting.execution.ManualWaitingExcludeWithInclude;
import shiver.me.timbers.waiting.execution.ManualWaitingFor;
import shiver.me.timbers.waiting.execution.ManualWaitingForNotNull;
import shiver.me.timbers.waiting.execution.ManualWaitingForTrue;
import shiver.me.timbers.waiting.execution.ManualWaitingInclude;
import shiver.me.timbers.waiting.execution.ManualWaitingIncludeWithExclude;
import shiver.me.timbers.waiting.execution.ManualWaitingInterval;
import shiver.me.timbers.waiting.execution.ManualWaitingTimeout;
import shiver.me.timbers.waiting.execution.WaitingExclude;
import shiver.me.timbers.waiting.execution.WaitingFor;
import shiver.me.timbers.waiting.execution.WaitingForNotNull;
import shiver.me.timbers.waiting.execution.WaitingForTrue;
import shiver.me.timbers.waiting.execution.WaitingInclude;
import shiver.me.timbers.waiting.execution.WaitingInterval;
import shiver.me.timbers.waiting.execution.WaitingTimeout;
import java.util.List;
import java.util.concurrent.TimeUnit;
public class ITManualWaiter extends AbstractITWaiter {
@Override
public WaitingInterval interval(final long duration, final TimeUnit unit) {
return new ManualWaitingInterval(duration, unit);
}
@Override
public WaitingTimeout timeout(final long duration, final TimeUnit unit) {
return new ManualWaitingTimeout(duration, unit);
}
@Override
public WaitingFor waitFor(final long duration, final TimeUnit unit, final ResultValidator validator) {
return new ManualWaitingFor(duration, unit, validator);
}
@Override
public WaitingForNotNull waitForNotNull(final long duration, final TimeUnit unit, final boolean isNotNull) {
return new ManualWaitingForNotNull(duration, unit, isNotNull);
}
@Override
public WaitingForTrue waitForTrue(final long duration, final TimeUnit unit, final boolean isTrue) {
return new ManualWaitingForTrue(duration, unit, isTrue);
}
@Override
public WaitingInclude includes(final long duration, final TimeUnit unit, final Throwable... includes) {
return new ManualWaitingInclude(duration, unit, includes);
}
@Override
public WaitingInclude includesWithExcludes(
final long duration,
final TimeUnit unit,
final List<Throwable> includes,
final List<Throwable> excludes
) {
return new ManualWaitingIncludeWithExclude(duration, unit, includes, excludes);
}
@Override
public WaitingExclude excludes(final long duration, final TimeUnit unit, final Throwable... excludes) {
return new ManualWaitingExclude(duration, unit, excludes);
}
@Override
public WaitingExclude excludesWithIncludes(long duration, TimeUnit unit, List<Throwable> excludes, List<Throwable> includes) {
return new ManualWaitingExcludeWithInclude(duration, unit, excludes, includes);
}
}
| apache-2.0 |
moral369/DNM_SNS | Favorite/src/com/tarks/favorite/webview.java | 5776 | package com.tarks.favorite;
import android.app.AlertDialog;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.view.KeyEvent;
import android.view.View;
import android.view.inputmethod.InputMethodManager;
import android.webkit.CookieSyncManager;
import android.webkit.ValueCallback;
import android.webkit.WebChromeClient;
import android.webkit.WebView;
import android.webkit.WebViewClient;
import android.widget.EditText;
import android.widget.ProgressBar;
import com.actionbarsherlock.app.SherlockActivity;
import com.actionbarsherlock.app.ActionBar;
import com.actionbarsherlock.view.Menu;
import com.actionbarsherlock.view.MenuItem;
import com.actionbarsherlock.view.MenuInflater;
import com.tarks.favorite.global.Global;
public final class webview extends SherlockActivity {
private WebView browser;
private ProgressBar progressBar;
//Check Error
private boolean firsttime_error = true;
// 엽로드정의
private static final int FILECHOOSER_RESULTCODE = 1;
private ValueCallback<Uri> uploadMessage = null;
// 업로로로드드드드당담
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == FILECHOOSER_RESULTCODE && uploadMessage != null) {
Uri result = data == null || resultCode != RESULT_OK ? null : data
.getData();
uploadMessage.onReceiveValue(result);
uploadMessage = null;
}
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.webview);
AvLog.i("");
//actionbar back
// getSupportActionBar().setDisplayHomeAsUpEnabled(true);
Intent intent = getIntent(); // 인텐트 받아오고
String url = intent.getStringExtra("url"); // 인텐트로 부터 데이터 가져오고
browser = (WebView) findViewById(R.id.webView1);
// habilitamos javascript y el zoom
browser.getSettings().setJavaScriptEnabled(true);
// 진저스크롤
browser.setVerticalScrollbarOverlay(true);
browser.loadUrl(url);
browser.setWebViewClient(new WebViewClient() {
// evita que los enlaces se abran fuera nuestra app en el navegador
// de android
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
if (url.startsWith("about:blank")) {
return false;
}
if (url.startsWith("http://tarks.net")) {
browser.loadUrl(url);
return false;
} else {
// 메
if (url.matches("(?i).*htm.*")) {
browser.loadUrl(url);
return true;
} else {
Uri uri = Uri.parse(url);
Intent it = new Intent(Intent.ACTION_VIEW, uri);
startActivity(it);
}
// 분리싹
}
return true;
}
// gestión de errores
private void loadUrl(String url) {
// TODO Auto-generated method stub
}
@Override
public void onReceivedError(WebView view, int errorCode,
String description, String failingUrl) {
if(firsttime_error){
firsttime_error = false;
browser.loadUrl("https://sites.google.com/site/tarksservicesstatus/");
}else{
browser.loadUrl("about:blank");
Global.Infoalert(webview.this, getString(R.string.networkerror), getString(R.string.networkerrord), getString(R.string.yes));
}
// AlertDialog.Builder builder = new
// AlertDialog.Builder(webview.this);
// builder.setMessage(getString(R.string.networkerrord)).setPositiveButton(getString(R.string.yes),
// null).setTitle(getString(R.string.networkerror));
// builder.show();
}
});
progressBar = (ProgressBar) findViewById(R.id.progressBar1);
browser.setWebChromeClient(new WebChromeClient() {
@Override
public void onProgressChanged(WebView view, int progress) {
progressBar.setProgress(0);
progressBar.setVisibility(View.VISIBLE);
webview.this.setProgress(progress * 1000);
progressBar.incrementProgressBy(progress);
if (progress == 100) {
progressBar.setVisibility(View.GONE);
}
}
// ICS 에서도 동작하기 위해서는 아래메소드도 넣어줘야함.
public void openFileChooser(ValueCallback<Uri> uploadFile,
String acceptType) {
openFileChooser(uploadFile);
}
public void openFileChooser(ValueCallback<Uri> uploadMsg,
String acceptType, String capture) {
openFileChooser(uploadMsg);
}
public void openFileChooser(ValueCallback<Uri> uploadMsg) {
uploadMessage = uploadMsg;
Intent i = new Intent(Intent.ACTION_GET_CONTENT);
i.addCategory(Intent.CATEGORY_OPENABLE);
i.setType("*/*");
startActivityForResult(Intent.createChooser(i, "File Chooser"),
FILECHOOSER_RESULTCODE);
}
// 웹뷰랑께?
@Override
public void onReceivedTitle(WebView view, String title) {
webview.this.setTitle(webview.this.browser.getTitle());
}
});
}
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
// if ((keyCode == KeyEvent.KEYCODE_BACK) && browser.canGoBack()) {
//
// browser.goBack();
//
// return true;
//
// }
return super.onKeyDown(keyCode, event);
}
// //////////BOTONES DE NAVEGACI?N /////////
// oculta el teclado al pulsar el botón
// he observado que si se pulsa "Ir" sin modificarse la url no se
// ejecuta el método onPageStarted, así que nos aseguramos
// que siempre que se cargue una url, aunque sea la que se está
// mostrando, se active el botón "detener"
private static boolean canGoBack() {
// TODO Auto-generated method stub
return false;
}
public void anterior(View view) {
browser.goBack();
}
public void siguiente(View view) {
browser.goForward();
}
public void detener(View view) {
browser.stopLoading();
}
} | apache-2.0 |
VahidN/MvcPlugin | MvcPluginMasterApp/Properties/AssemblyInfo.cs | 1372 | using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("MvcPluginMasterApp")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("MvcPluginMasterApp")]
[assembly: AssemblyCopyright("Copyright © 2015")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("0de309dd-8b5b-42e1-829a-fcb2edc6ee47")]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Revision and Build Numbers
// by using the '*' as shown below:
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
| apache-2.0 |
pietrog/astre | public/app.js | 1428 | (function(){
'use strict';
// Declare app level module which depends on views, and components
angular.module('astre', ['ui.router',
'LocalStorageModule',
'pietro.authentication',
'admin']);
angular.module('astre')
.config(config);
//function authenticationInterceptor(
function config($stateProvider, $urlRouterProvider, $httpProvider, localStorageServiceProvider ){
$urlRouterProvider.otherwise("/home/accueil");
localStorageServiceProvider.setPrefix('astre');
localStorageServiceProvider.setStorageType('windowStorage');
//add interceptor for authentication management
$httpProvider.interceptors.push('AuthenticationInterceptorFactory');
$stateProvider
.state("root", {
url: '',
abstract: true,
views: {
"header": { templateUrl: "views/header.html" },
"loginPart":{
templateUrl: "components/authentication/views/login.html",
controller: "UserLoginCtrl",
controllerAs: "loginCtrl"
}
}
})
.state("root.members", {
url: "/membres",
views: {
"mainview@": { templateUrl: "views/members.html" }
}
})
.state("root.contact", {
url: "/contact",
views: {
"mainview@": { templateUrl: "components/astre/views/contact.html" }
}
})
.state("root.links", {
url: "/links",
views: {
"mainview@": { templateUrl: "views/enconstruction.html" }
}
})
}
})();
| apache-2.0 |
zpao/buck | test/com/facebook/buck/features/apple/projectV2/BuildConfigurationTest.java | 10856 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.features.apple.projectV2;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import com.facebook.buck.apple.AppleLibraryBuilder;
import com.facebook.buck.core.cell.Cell;
import com.facebook.buck.core.cell.TestCellBuilder;
import com.facebook.buck.core.model.BuildTarget;
import com.facebook.buck.core.model.BuildTargetFactory;
import com.facebook.buck.core.model.impl.BuildTargetPaths;
import com.facebook.buck.core.model.targetgraph.TargetNode;
import com.facebook.buck.cxx.CxxLibraryBuilder;
import com.facebook.buck.cxx.toolchain.CxxPlatformUtils;
import com.facebook.buck.features.halide.HalideLibraryBuilder;
import com.facebook.buck.io.filesystem.ProjectFilesystem;
import com.facebook.buck.io.filesystem.impl.FakeProjectFilesystem;
import com.facebook.buck.util.environment.Platform;
import com.facebook.buck.util.timing.DefaultClock;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedMap;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.FileTime;
import java.util.Map;
import java.util.Optional;
import org.junit.Before;
import org.junit.Test;
public class BuildConfigurationTest {
private ProjectFilesystem projectFilesystem;
private Cell cell;
private BuildTarget fooBuildTarget;
@Before
public void setUp() {
assumeTrue(Platform.detect() == Platform.MACOS || Platform.detect() == Platform.LINUX);
projectFilesystem = new FakeProjectFilesystem(new DefaultClock());
cell = (new TestCellBuilder()).setFilesystem(projectFilesystem).build();
fooBuildTarget = BuildTargetFactory.newInstance("//bar:foo");
}
@Test
public void testWriteBuildConfigurationsForTarget() throws IOException {
TargetNode fooTargetNode = AppleLibraryBuilder.createBuilder(fooBuildTarget).build();
XCodeNativeTargetAttributes.Builder nativeTargetAttributes =
XCodeNativeTargetAttributes.builder()
.setAppleConfig(AppleProjectHelper.createDefaultAppleConfig(projectFilesystem));
ImmutableSet.Builder<String> targetConfigNamesBuilder = ImmutableSet.builder();
ImmutableSet.Builder<Path> xcconfigPathsBuilder = ImmutableSet.builder();
ImmutableMap<String, String> overrideBuildSettings =
ImmutableMap.<String, String>builder().put("cxxFlag", "override").build();
ImmutableMap<String, String> buckXcodeBuildSettings =
ImmutableMap.<String, String>builder().put("REPO_ROOT", "/this_is_your_repo").build();
ImmutableMap<String, String> appendBuildSettings =
ImmutableMap.<String, String>builder().put("cxxFlag", "append").build();
BuildConfiguration.writeBuildConfigurationsForTarget(
fooTargetNode,
fooBuildTarget,
CxxPlatformUtils.DEFAULT_PLATFORM,
nativeTargetAttributes,
overrideBuildSettings,
buckXcodeBuildSettings,
appendBuildSettings,
projectFilesystem,
false,
targetConfigNamesBuilder,
xcconfigPathsBuilder);
assertEquals(3, nativeTargetAttributes.build().xcconfigs().size());
assertEquals(
ImmutableSet.of(
BuildConfiguration.DEBUG_BUILD_CONFIGURATION_NAME,
BuildConfiguration.PROFILE_BUILD_CONFIGURATION_NAME,
BuildConfiguration.RELEASE_BUILD_CONFIGURATION_NAME),
targetConfigNamesBuilder.build());
for (Path xcconfigPath : xcconfigPathsBuilder.build()) {
assertTrue(
projectFilesystem.exists(projectFilesystem.getRootPath().relativize(xcconfigPath)));
}
}
@Test
public void testGetBuildConfigurationsForAppleTargetNode() {
TargetNode fooTargetNode = AppleLibraryBuilder.createBuilder(fooBuildTarget).build();
ImmutableSortedMap<String, ImmutableMap<String, String>> buildConfigs =
BuildConfiguration.getBuildConfigurationsForTargetNode(fooTargetNode);
verifyExpectedBuildConfigurationsExist(buildConfigs, Optional.empty());
}
@Test
public void testGetBuildConfigurationsForAppleTargetNodeHasTargetInlineConfig() {
ImmutableSortedMap.Builder<String, ImmutableMap<String, String>> testConfigsBuilder =
ImmutableSortedMap.naturalOrder();
ImmutableMap<String, String> debugConfig =
ImmutableMap.<String, String>builder().put("someKey", "someValue").build();
testConfigsBuilder.put(BuildConfiguration.DEBUG_BUILD_CONFIGURATION_NAME, debugConfig);
TargetNode fooTargetNode =
AppleLibraryBuilder.createBuilder(fooBuildTarget)
.setConfigs(testConfigsBuilder.build())
.build();
ImmutableSortedMap<String, ImmutableMap<String, String>> buildConfigs =
BuildConfiguration.getBuildConfigurationsForTargetNode(fooTargetNode);
verifyExpectedBuildConfigurationsExist(buildConfigs, Optional.empty());
verifyBuildConfigurationExists(
buildConfigs, BuildConfiguration.DEBUG_BUILD_CONFIGURATION_NAME, debugConfig);
}
@Test
public void testGetBuildConfigurationsForHalideTargetNode() throws IOException {
TargetNode fooTargetNode = new HalideLibraryBuilder(fooBuildTarget).build();
ImmutableSortedMap<String, ImmutableMap<String, String>> buildConfigs =
BuildConfiguration.getBuildConfigurationsForTargetNode(fooTargetNode);
verifyExpectedBuildConfigurationsExist(buildConfigs, Optional.empty());
}
@Test
public void testGetBuildConfigurationsForCxxTargetNode() {
TargetNode fooTargetNode = new CxxLibraryBuilder(fooBuildTarget).build();
ImmutableSortedMap<String, ImmutableMap<String, String>> buildConfigs =
BuildConfiguration.getBuildConfigurationsForTargetNode(fooTargetNode);
verifyExpectedBuildConfigurationsExist(buildConfigs, Optional.empty());
}
@Test
public void testMergeSettings() {
ImmutableMap<String, String> configSettings =
ImmutableMap.<String, String>builder().put("baseFlag", "baseValue").build();
ImmutableMap<String, String> cxxPlatformBuildSettings =
ImmutableMap.<String, String>builder().put("cxxFlag", "original").build();
ImmutableMap<String, String> overrideBuildSettings =
ImmutableMap.<String, String>builder().put("cxxFlag", "override").build();
ImmutableMap<String, String> buckXcodeBuildSettings =
ImmutableMap.<String, String>builder().put("REPO_ROOT", "/this_is_your_repo").build();
ImmutableMap<String, String> appendBuildSettings =
ImmutableMap.<String, String>builder().put("cxxFlag", "append").build();
ImmutableSortedMap<String, String> mergedSettings =
BuildConfiguration.mergeBuildSettings(
configSettings,
cxxPlatformBuildSettings,
overrideBuildSettings,
buckXcodeBuildSettings,
appendBuildSettings);
assertEquals(mergedSettings.get("REPO_ROOT"), "/this_is_your_repo");
assertEquals(mergedSettings.get("baseFlag"), "baseValue");
assertEquals(mergedSettings.get("cxxFlag"), "$(inherited) append");
}
@Test
public void testGetConfigurationXcconfigPath() {
Path xcconfigPath =
BuildConfiguration.getXcconfigPath(
projectFilesystem, fooBuildTarget, BuildConfiguration.DEBUG_BUILD_CONFIGURATION_NAME);
assertEquals(
BuildTargetPaths.getGenPath(projectFilesystem, fooBuildTarget, "%s-Debug.xcconfig"),
xcconfigPath);
}
@Test
public void testWriteBuildConfiguration() throws IOException, InterruptedException {
ImmutableSortedMap<String, String> config =
ImmutableSortedMap.<String, String>naturalOrder().put("SOME_FLAG", "value").build();
Path filePath = Paths.get("new-dir/test.xcconfig");
BuildConfiguration.writeBuildConfiguration(projectFilesystem, filePath, config, false);
Optional<String> fileContents = projectFilesystem.readFileIfItExists(filePath);
assertEquals("SOME_FLAG = value\n", fileContents.get());
// Verify that the same file is not rewritten.
FileTime modifiedTime = projectFilesystem.getLastModifiedTime(filePath);
BuildConfiguration.writeBuildConfiguration(projectFilesystem, filePath, config, false);
FileTime newModifiedTime = projectFilesystem.getLastModifiedTime(filePath);
assertEquals(modifiedTime, newModifiedTime);
Thread.sleep(100);
// Verify that a new file is
config =
ImmutableSortedMap.<String, String>naturalOrder()
.put("SOME_FLAG", "value new y'all")
.build();
BuildConfiguration.writeBuildConfiguration(projectFilesystem, filePath, config, false);
newModifiedTime = projectFilesystem.getLastModifiedTime(filePath);
assertNotEquals(modifiedTime, newModifiedTime);
}
private void verifyExpectedBuildConfigurationsExist(
ImmutableSortedMap<String, ImmutableMap<String, String>> buildConfigs,
Optional<ImmutableSortedMap<String, ImmutableMap<String, String>>>
expectedAdditionalConfigs) {
ImmutableSortedMap<String, ImmutableMap<String, String>> additionalConfigs =
ImmutableSortedMap.of();
if (expectedAdditionalConfigs.isPresent()) {
additionalConfigs = expectedAdditionalConfigs.get();
}
assertEquals(3 + additionalConfigs.size(), buildConfigs.size());
assertTrue(buildConfigs.keySet().contains(BuildConfiguration.DEBUG_BUILD_CONFIGURATION_NAME));
assertTrue(buildConfigs.keySet().contains(BuildConfiguration.PROFILE_BUILD_CONFIGURATION_NAME));
assertTrue(buildConfigs.keySet().contains(BuildConfiguration.RELEASE_BUILD_CONFIGURATION_NAME));
for (Map.Entry<String, ImmutableMap<String, String>> entry : additionalConfigs.entrySet()) {
assertTrue(buildConfigs.keySet().contains(entry.getKey()));
assertEquals(entry.getValue(), buildConfigs.get(entry.getKey()));
}
}
private void verifyBuildConfigurationExists(
ImmutableSortedMap<String, ImmutableMap<String, String>> buildConfigs,
String configName,
ImmutableMap<String, String> buildSettings) {
assertTrue(buildConfigs.keySet().contains(configName));
assertEquals(buildConfigs.get(configName), buildSettings);
}
}
| apache-2.0 |
prebid/prebid-server | openrtb_ext/imp_jixie.go | 218 | package openrtb_ext
type ExtImpJixie struct {
Unit string `json:"unit"`
AccountId string `json:"accountid,omitempty"`
JxProp1 string `json:"jxprop1,omitempty"`
JxProp2 string `json:"jxprop2,omitempty"`
}
| apache-2.0 |
markcowl/azure-sdk-for-net | sdk/synapse/Microsoft.Azure.Management.Synapse/src/Generated/Models/BigDataPoolResourceInfo.cs | 7954 | // <auto-generated>
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
// </auto-generated>
namespace Microsoft.Azure.Management.Synapse.Models
{
using Microsoft.Rest;
using Microsoft.Rest.Serialization;
using Newtonsoft.Json;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
/// <summary>
/// Big Data pool
/// </summary>
/// <remarks>
/// A Big Data pool
/// </remarks>
[Rest.Serialization.JsonTransformation]
public partial class BigDataPoolResourceInfo : TrackedResource
{
/// <summary>
/// Initializes a new instance of the BigDataPoolResourceInfo class.
/// </summary>
public BigDataPoolResourceInfo()
{
CustomInit();
}
/// <summary>
/// Initializes a new instance of the BigDataPoolResourceInfo class.
/// </summary>
/// <param name="location">The geo-location where the resource
/// lives</param>
/// <param name="id">Fully qualified resource Id for the resource. Ex -
/// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}</param>
/// <param name="name">The name of the resource</param>
/// <param name="type">The type of the resource. Ex-
/// Microsoft.Compute/virtualMachines or
/// Microsoft.Storage/storageAccounts.</param>
/// <param name="tags">Resource tags.</param>
/// <param name="provisioningState">The state of the Big Data
/// pool.</param>
/// <param name="autoScale">Auto-scaling properties</param>
/// <param name="creationDate">The time when the Big Data pool was
/// created.</param>
/// <param name="autoPause">Auto-pausing properties</param>
/// <param name="isComputeIsolationEnabled">Whether compute isolation
/// is required or not.</param>
/// <param name="sparkEventsFolder">The Spark events folder</param>
/// <param name="nodeCount">The number of nodes in the Big Data
/// pool.</param>
/// <param name="libraryRequirements">Library version
/// requirements</param>
/// <param name="sparkVersion">The Apache Spark version.</param>
/// <param name="defaultSparkLogFolder">The default folder where Spark
/// logs will be written.</param>
/// <param name="nodeSize">The level of compute power that each node in
/// the Big Data pool has. Possible values include: 'None', 'Small',
/// 'Medium', 'Large', 'XLarge', 'XXLarge'</param>
/// <param name="nodeSizeFamily">The kind of nodes that the Big Data
/// pool provides. Possible values include: 'None',
/// 'MemoryOptimized'</param>
public BigDataPoolResourceInfo(string location, string id = default(string), string name = default(string), string type = default(string), IDictionary<string, string> tags = default(IDictionary<string, string>), string provisioningState = default(string), AutoScaleProperties autoScale = default(AutoScaleProperties), System.DateTime? creationDate = default(System.DateTime?), AutoPauseProperties autoPause = default(AutoPauseProperties), bool? isComputeIsolationEnabled = default(bool?), string sparkEventsFolder = default(string), int? nodeCount = default(int?), LibraryRequirements libraryRequirements = default(LibraryRequirements), string sparkVersion = default(string), string defaultSparkLogFolder = default(string), string nodeSize = default(string), string nodeSizeFamily = default(string))
: base(location, id, name, type, tags)
{
ProvisioningState = provisioningState;
AutoScale = autoScale;
CreationDate = creationDate;
AutoPause = autoPause;
IsComputeIsolationEnabled = isComputeIsolationEnabled;
SparkEventsFolder = sparkEventsFolder;
NodeCount = nodeCount;
LibraryRequirements = libraryRequirements;
SparkVersion = sparkVersion;
DefaultSparkLogFolder = defaultSparkLogFolder;
NodeSize = nodeSize;
NodeSizeFamily = nodeSizeFamily;
CustomInit();
}
/// <summary>
/// An initialization method that performs custom operations like setting defaults
/// </summary>
partial void CustomInit();
/// <summary>
/// Gets or sets the state of the Big Data pool.
/// </summary>
[JsonProperty(PropertyName = "properties.provisioningState")]
public string ProvisioningState { get; set; }
/// <summary>
/// Gets or sets auto-scaling properties
/// </summary>
[JsonProperty(PropertyName = "properties.autoScale")]
public AutoScaleProperties AutoScale { get; set; }
/// <summary>
/// Gets or sets the time when the Big Data pool was created.
/// </summary>
[JsonProperty(PropertyName = "properties.creationDate")]
public System.DateTime? CreationDate { get; set; }
/// <summary>
/// Gets or sets auto-pausing properties
/// </summary>
[JsonProperty(PropertyName = "properties.autoPause")]
public AutoPauseProperties AutoPause { get; set; }
/// <summary>
/// Gets or sets whether compute isolation is required or not.
/// </summary>
[JsonProperty(PropertyName = "properties.isComputeIsolationEnabled")]
public bool? IsComputeIsolationEnabled { get; set; }
/// <summary>
/// Gets or sets the Spark events folder
/// </summary>
[JsonProperty(PropertyName = "properties.sparkEventsFolder")]
public string SparkEventsFolder { get; set; }
/// <summary>
/// Gets or sets the number of nodes in the Big Data pool.
/// </summary>
[JsonProperty(PropertyName = "properties.nodeCount")]
public int? NodeCount { get; set; }
/// <summary>
/// Gets or sets library version requirements
/// </summary>
[JsonProperty(PropertyName = "properties.libraryRequirements")]
public LibraryRequirements LibraryRequirements { get; set; }
/// <summary>
/// Gets or sets the Apache Spark version.
/// </summary>
[JsonProperty(PropertyName = "properties.sparkVersion")]
public string SparkVersion { get; set; }
/// <summary>
/// Gets or sets the default folder where Spark logs will be written.
/// </summary>
[JsonProperty(PropertyName = "properties.defaultSparkLogFolder")]
public string DefaultSparkLogFolder { get; set; }
/// <summary>
/// Gets or sets the level of compute power that each node in the Big
/// Data pool has. Possible values include: 'None', 'Small', 'Medium',
/// 'Large', 'XLarge', 'XXLarge'
/// </summary>
[JsonProperty(PropertyName = "properties.nodeSize")]
public string NodeSize { get; set; }
/// <summary>
/// Gets or sets the kind of nodes that the Big Data pool provides.
/// Possible values include: 'None', 'MemoryOptimized'
/// </summary>
[JsonProperty(PropertyName = "properties.nodeSizeFamily")]
public string NodeSizeFamily { get; set; }
/// <summary>
/// Validate the object.
/// </summary>
/// <exception cref="ValidationException">
/// Thrown if validation fails
/// </exception>
public override void Validate()
{
base.Validate();
}
}
}
| apache-2.0 |
xiekeyang/containerd | services/images/helpers.go | 2169 | package images
import (
imagesapi "github.com/containerd/containerd/api/services/images"
"github.com/containerd/containerd/api/types/descriptor"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/namespaces"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func imagesToProto(images []images.Image) []imagesapi.Image {
var imagespb []imagesapi.Image
for _, image := range images {
imagespb = append(imagespb, imageToProto(&image))
}
return imagespb
}
func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
var images []images.Image
for _, image := range imagespb {
images = append(images, imageFromProto(&image))
}
return images
}
func imageToProto(image *images.Image) imagesapi.Image {
return imagesapi.Image{
Name: image.Name,
Target: descToProto(&image.Target),
}
}
func imageFromProto(imagepb *imagesapi.Image) images.Image {
return images.Image{
Name: imagepb.Name,
Target: descFromProto(&imagepb.Target),
}
}
func descFromProto(desc *descriptor.Descriptor) ocispec.Descriptor {
return ocispec.Descriptor{
MediaType: desc.MediaType,
Size: desc.Size_,
Digest: desc.Digest,
}
}
func descToProto(desc *ocispec.Descriptor) descriptor.Descriptor {
return descriptor.Descriptor{
MediaType: desc.MediaType,
Size_: desc.Size,
Digest: desc.Digest,
}
}
func rewriteGRPCError(err error) error {
if err == nil {
return err
}
switch grpc.Code(errors.Cause(err)) {
case codes.AlreadyExists:
return metadata.ErrExists
case codes.NotFound:
return metadata.ErrNotFound
}
return err
}
func mapGRPCError(err error, id string) error {
switch {
case metadata.IsNotFound(err):
return grpc.Errorf(codes.NotFound, "image %v not found", id)
case metadata.IsExists(err):
return grpc.Errorf(codes.AlreadyExists, "image %v already exists", id)
case namespaces.IsNamespaceRequired(err):
return grpc.Errorf(codes.InvalidArgument, "namespace required, please set %q header", namespaces.GRPCHeader)
}
return err
}
| apache-2.0 |
blackmad/snippets | logic/user_logic.py | 1038 | from app import db
from models.group import Group
from models.user import User
from models.snippet import Snippet
from models.link_tables import *
from datetime import date, datetime, timedelta
class UserLogic():
@staticmethod
def find_user_by_email(email):
return db.session.query(User).filter_by(id=email).one()
@staticmethod
def subscribe_to_user(from_user, to_user):
sub = UserSubscription.get_or_create(
db.session,
from_user_id = from_user.id,
to_user_id = to_user.id
)
db.session.commit()
return sub
@staticmethod
def unsubscribe_to_user(from_user, to_user):
db.session.query(UserSubscription).filter_by(
from_user_id = from_user.id,
to_user_id = to_user.id
).delete()
db.session.commit()
@staticmethod
def get_last_snippets(user, max_weeks=10):
d = date.today() - timedelta(days=max_weeks+1)
return db.session.query(Snippet).filter(
Snippet.created_at >= d,
Snippet.user_id == user.id
).order_by(Snippet.created_at.desc()) | apache-2.0 |
vatbub/hangman-solver | src/main/java/stats/HangmanStats.java | 9907 | package stats;
/*-
* #%L
* Hangman Solver
* %%
* Copyright (C) 2016 Frederik Kammel
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import com.github.vatbub.common.core.Prefs;
import com.github.vatbub.common.core.logging.FOKLogger;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.model.Filters;
import com.mongodb.client.model.Updates;
import languages.Language;
import languages.TabFile;
import org.bson.Document;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.logging.Level;
/**
* This class is intended to count the words used in the solver in a
* <a href="https://www.mongodb.com/">MongoDB</a> for the social experiment.
*
* @author Frederik Kammel
*/
public class HangmanStats {
/**
* A {@link List} that contains all words that were already submitted. This
* ensures that the word counts in the
* <a href="https://www.mongodb.com/">MongoDB</a> are correct.
*/
private static final List<String> alreadySubmittedWordsInThisSession = new ArrayList<>();
/**
* The current upload queue.
*/
private static final LinkedBlockingQueue<Document> docQueue = new LinkedBlockingQueue<>();
/**
* This object is used to save a copy of the upload queue on the disc to
* keep it even if the app is relaunched.
*/
private static final Prefs preferences = initPrefs();
/**
* The pref key where the offline copy of the upload queue is saved.
*/
private static final String persistentDocQueueKey = "docQueue";
/**
* This thread runs in the background and uploads all submitted words. This
* concept ensures that all words are submitted even if the player was
* offline while playing.
*/
public static final Thread uploadThread = new Thread() {
private boolean interrupted = false;
@Override
public void run() {
this.setName("uploadThread");
FOKLogger.info(HangmanStats.class.getName(), "Starting " + this.getName() + "...");
readDocQueueFromPreferences();
while (!interrupted) {
try {
if (MongoSetup.isReachable()) {
if (!docQueue.isEmpty()) {
Document newDoc = docQueue.remove();
String word = newDoc.getString("word");
String langCode = newDoc.getString("lang");
MongoCollection<Document> coll = MongoSetup.getWordsUsedCollection();
Document doc = coll
.find(Filters.and(Filters.eq("word", word), Filters.eq("lang", langCode))).first();
FOKLogger.info(HangmanStats.class.getName(), "Transferring word " + word + "...");
if (doc == null) {
// word never added prior to this
doc = new Document("word", word).append("lang", langCode).append("count", 1);
coll.insertOne(doc);
} else {
// word already known in the database
coll.updateOne(Filters.and(Filters.eq("word", word), Filters.eq("lang", langCode)),
Updates.inc("count", 1));
}
}
}
} catch (Exception e) {
System.err.println(
"Something went wrong while transferring a document to the MongoDB but don't worry, the document was probably saved on your hard drive and will be transferred after launching the app again.");
}
}
}
@Override
public void interrupt() {
interrupted = true;
saveDocQueueToPreferences();
FOKLogger.info(HangmanStats.class.getName(), "Shutting " + this.getName() + " down...");
}
};
/**
* Takes the content of the docQueue and saves its content using the Prefs
* class from the common project
*/
private static void saveDocQueueToPreferences() {
if (preferences != null) {
FOKLogger.info(HangmanStats.class.getName(), "Saving docQueue to disk...");
StringBuilder res = new StringBuilder();
while (!docQueue.isEmpty()) {
Document doc = docQueue.remove();
res.append(doc.toJson());
if (!docQueue.isEmpty()) {
// Still other objects left so add a line break
res.append("\n");
}
}
preferences.setPreference(persistentDocQueueKey, res.toString());
} else {
FOKLogger.info(HangmanStats.class.getName(), "Cannot save docQueue to disk as preferences could not be initialized.");
}
}
/**
* Reads the persistent copy of the docQueue if one exists and merges it
* with the docQueue in memory.
*/
private static void readDocQueueFromPreferences() {
if (preferences != null) {
FOKLogger.info(HangmanStats.class.getName(), "Reading docQueue from disk...");
String persStr = preferences.getPreference(persistentDocQueueKey, "");
if (!persStr.equals("")) {
String[] docs = persStr.split("\n");
for (String newDoc : docs) {
Document doc = Document.parse(newDoc);
if (!docQueue.contains(doc)) {
try {
docQueue.put(doc);
} catch (InterruptedException e) {
FOKLogger.log(HangmanStats.class.getName(), Level.SEVERE, "An error occurred", e);
}
}
}
}
} else {
FOKLogger.info(HangmanStats.class.getName(), "Cannot read docQueue to disk as preferences could not be initialized.");
}
}
private static Prefs initPrefs() {
Prefs res = null;
try {
res = new Prefs(HangmanStats.class.getName());
} catch (Exception e) {
// Disable offline cache of stats
}
return res;
}
/**
* Adds a word to the mongodb database
*
* @param word The word to be added
* @param lang The Language the user is currently playing in
*/
public static void addWordToDatabase(String word, Language lang) {
if (!uploadThread.isAlive()) {
uploadThread.start();
}
String[] words = word.split(" ");
for (String w : words) {
if (!alreadySubmittedWordsInThisSession.contains(w)) {
// word not submitted yet
alreadySubmittedWordsInThisSession.add(w);
FOKLogger.info(HangmanStats.class.getName(), "Submitting word '" + w + "' to MongoDB...");
Document doc = new Document("word", w).append("lang", lang.getLanguageCode()).append("count", 1);
try {
docQueue.put(doc);
} catch (InterruptedException e) {
FOKLogger.log(HangmanStats.class.getName(), Level.SEVERE, "An error occurred", e);
}
FOKLogger.info(HangmanStats.class.getName(), "Submission done.");
}
}
}
/**
* Merges the database entries with the given dictionary to enhance the
* dictionary. After merging the online database and the local one, the
* local copy will be sorted by the amount that users used a word so that
* most used words will be preffered.
*
* @param dictionary The Dictionary to be merged
* @param lang The language requested
*/
public static void mergeWithDictionary(TabFile dictionary, Language lang) {
FOKLogger.info(HangmanStats.class.getName(), "Merging offline dictionary for language " + lang.getLanguageCode() + " with online database...");
MongoCollection<Document> coll = MongoSetup.getWordsUsedCollection();
for (Document doc : coll.find(Filters.eq("lang", lang.getLanguageCode()))) {
String word = doc.get("word").toString();
int count = doc.getInteger("count");
List<Integer> indexList = dictionary.indexOfIgnoreCase(word, 2);
if (indexList.isEmpty()) {
// Word not yet present in dictionary so add it
dictionary.addRow(new String[]{"fromOnlineDatabase", lang.getLanguageCode() + ":lemma", word,
Integer.toString(count)});
} else {
dictionary.setValueAt(Integer.toString(count), indexList, 3);
}
}
FOKLogger.info(HangmanStats.class.getName(), "Merge finished, sorting TabFile now...");
dictionary.sortDescending(3);
FOKLogger.info(HangmanStats.class.getName(), "Sorting finished.");
}
}
| apache-2.0 |
Whiley/WhileyTheoremProver | src/main/java/wyal/WyalMain.java | 1904 | // Copyright 2011 The Whiley Project Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wyal;
import java.io.IOException;
import wyal.io.WyalFileLexer;
import wyal.io.WyalFileParser;
import wyal.io.WyalFilePrinter;
import wyal.lang.WyalFile;
import wyal.lang.WyalFile;
import wyfs.lang.Content;
import wyfs.lang.Content.Type;
import wyfs.lang.Path;
import wyfs.lang.Path.Entry;
import wyfs.util.DirectoryRoot;
import wyfs.util.Trie;
public class WyalMain {
public static Content.Registry registry = new Content.Registry() {
@Override
public void associate(Entry e) {
if(e.suffix().equals("wyal")) {
e.associate(WyalFile.ContentType, null);
}
}
@Override
public String suffix(Type<?> t) {
return t.getSuffix();
}
@Override
public Type<?> contentType(String suffix) {
// TODO Auto-generated method stub
return null;
}
};
public static void main(String[] args) throws IOException {
// Construct a dummy project
DirectoryRoot dir = new DirectoryRoot(".",registry);
Path.ID id = Trie.fromString(args[0]);
Path.Entry e = dir.get(id,WyalFile.ContentType);
// Now, lex and parse the source file
WyalFileLexer lexer = new WyalFileLexer(e);
WyalFileParser parser = new WyalFileParser(new WyalFile(e),lexer.scan());
WyalFile file = parser.read();
// Finally, print it out
new WyalFilePrinter(System.out).write(file);
}
}
| apache-2.0 |
aspectran/aspectran | rss-lettuce/src/main/java/com/aspectran/core/component/session/redis/lettuce/cluster/ClusterLettuceSessionStore.java | 3433 | /*
* Copyright (c) 2008-2021 The Aspectran Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aspectran.core.component.session.redis.lettuce.cluster;
import com.aspectran.core.component.session.SessionData;
import com.aspectran.core.component.session.redis.lettuce.AbstractLettuceSessionStore;
import com.aspectran.core.component.session.redis.lettuce.ConnectionPool;
import com.aspectran.core.component.session.redis.lettuce.SessionDataCodec;
import io.lettuce.core.RedisConnectionException;
import io.lettuce.core.ScanIterator;
import io.lettuce.core.cluster.api.StatefulRedisClusterConnection;
import io.lettuce.core.cluster.api.sync.RedisClusterCommands;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* A Redis-based session store using Lettuce as the client.
*
* <p>Created: 2019/12/06</p>
*
* @since 6.6.0
*/
public class ClusterLettuceSessionStore extends AbstractLettuceSessionStore {
private final ConnectionPool<StatefulRedisClusterConnection<String, SessionData>> pool;
public ClusterLettuceSessionStore(ConnectionPool<StatefulRedisClusterConnection<String, SessionData>> pool) {
this.pool = pool;
}
@Override
protected void doInitialize() throws Exception {
SessionDataCodec codec = new SessionDataCodec(getNonPersistentAttributes());
pool.initialize(codec);
}
@Override
protected void doDestroy() throws Exception {
pool.destroy();
}
private StatefulRedisClusterConnection<String, SessionData> getConnection() {
try {
return pool.getConnection();
} catch (Exception e) {
throw RedisConnectionException.create(e);
}
}
<R> R sync(Function<RedisClusterCommands<String, SessionData>, R> func) {
try (StatefulRedisClusterConnection<String, SessionData> conn = getConnection()) {
return func.apply(conn.sync());
}
}
@Override
public void scan(Consumer<SessionData> func) {
sync(c -> {
ScanIterator<String> scanIterator = ScanIterator.scan(c);
while (scanIterator.hasNext()) {
String key = scanIterator.next();
SessionData data = c.get(key);
func.accept(data);
}
return null;
});
}
@Override
public SessionData load(String id) {
return sync(c -> c.get(id));
}
@Override
public boolean delete(String id) {
return sync(c -> {
Long deleted = c.del(id);
return (deleted != null && deleted > 0L);
});
}
@Override
public boolean exists(String id) {
return sync(c -> {
SessionData data = c.get(id);
return checkExpiry(data);
});
}
@Override
public void doSave(String id, SessionData data) {
sync(c -> c.set(id, data));
}
}
| apache-2.0 |
twitter/twitter-cldr-rb | spec/shared/languages_spec.rb | 4216 | # encoding: UTF-8
# Copyright 2012 Twitter, Inc
# http://www.apache.org/licenses/LICENSE-2.0
require 'spec_helper'
describe TwitterCldr::Shared::Languages do
describe "#translate_language" do
it "should translate a language from one locale to another" do
expect(described_class.translate_language("Russian", :en, :es)).to match_normalized("ruso")
expect(described_class.translate_language("ruso", :es, :en)).to match_normalized("Russian")
expect(described_class.translate_language("Spanish", :en, :es)).to match_normalized("español")
expect(described_class.translate_language("ruso", :es, :ru)).to match_normalized("русский")
end
it "should be capitalization agnostic" do
expect(described_class.translate_language("russian", :en, :es)).to match_normalized("ruso")
expect(described_class.translate_language("RUSSIAN", :en, :es)).to match_normalized("ruso")
end
it "defaults the destination language to English (or whatever the global locale is)" do
expect(described_class.translate_language("Ruso", :es)).to match_normalized("Russian")
expect(described_class.translate_language("русский", :ru)).to match_normalized("Russian")
end
it "defaults source and destination language to English if not given" do
expect(described_class.translate_language("Russian")).to match_normalized("Russian")
TwitterCldr.locale = :es
expect(described_class.translate_language("Russian")).to match_normalized("ruso")
end
it "successfully translates locale codes that are and are not in the CLDR using the locale map" do
expect(described_class.translate_language("Russian", :en, :'zh-cn')).to match_normalized("俄语")
expect(described_class.translate_language("Russian", :en, :'zh')).to match_normalized("俄语")
end
it "should return nil if no translation was found" do
expect(described_class.translate_language("Russian", :en, :blarg)).to eq(nil)
end
end
describe "#from_code_for_locale" do
it "should return the language in the correct locale for the given locale code (i.e. es in English should be Spanish)" do
expect(described_class.from_code_for_locale(:es, :en)).to match_normalized("Spanish")
expect(described_class.from_code_for_locale(:en, :es)).to match_normalized("inglés")
end
end
describe "#from_code" do
it "should return the language in the default locale for the given locale code" do
expect(described_class.from_code(:es)).to match_normalized("Spanish")
expect(described_class.from_code(:ru)).to match_normalized("Russian")
TwitterCldr.locale = :es
expect(described_class.from_code(:es)).to match_normalized("español")
end
end
describe "#all_for" do
it "should return a hash of all languages for the given language code" do
langs = described_class.all_for(:es)
expect(langs).to be_a(Hash)
expect(langs[:ru]).to match_normalized("ruso")
end
it "should return an empty hash for an invalid language" do
langs = described_class.all_for(:blarg)
expect(langs).to eq({})
end
end
describe "#all" do
it "should use the default language to get the language hash" do
langs = described_class.all
expect(langs).to be_a(Hash)
expect(langs[:ru]).to match_normalized("Russian")
expect(langs[:de]).to match_normalized("German")
TwitterCldr.locale = :es
langs = described_class.all
expect(langs).to be_a(Hash)
expect(langs[:ru]).to match_normalized("ruso")
expect(langs[:de]).to match_normalized("alemán")
end
end
describe "#is_rtl?" do
it "should return true for certain locales" do
[:ar, :he, :ur, :fa].each do |locale|
expect(described_class.is_rtl?(locale)).to eq(true)
end
end
it "should return false for certain locales" do
[:en, :es, :hu, :ja].each do |locale|
expect(described_class.is_rtl?(locale)).to eq(false)
end
end
it "should not raise errors for any locale" do
TwitterCldr.supported_locales.each do |locale|
expect { described_class.is_rtl?(locale) }.not_to raise_error
end
end
end
end
| apache-2.0 |
paulstapleton/flowable-engine | modules/flowable-ui/flowable-ui-app/src/test/java/org/flowable/ui/application/FlowableUiIdmApplicationDefaultAuthenticationTest.java | 4553 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.ui.application;
import static org.assertj.core.api.Assertions.assertThat;
import org.flowable.idm.api.IdmIdentityService;
import org.flowable.idm.api.User;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.boot.test.web.client.TestRestTemplate;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
/**
* @author Filip Hrisafov
*/
@RunWith(SpringRunner.class)
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
@Import(FlowableUiIdmApplicationDefaultAuthenticationTest.TestBootstrapConfiguration.class)
public class FlowableUiIdmApplicationDefaultAuthenticationTest {
@Autowired
private TestRestTemplate restTemplate;
@Test
public void authenticate() {
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED);
MultiValueMap<String, String> map = new LinkedMultiValueMap<>();
map.add("j_username", "my-admin");
map.add("j_password", "my-pass");
ResponseEntity<String> authenticationResponse = restTemplate
.postForEntity("/app/authentication", new HttpEntity<>(map, headers), String.class);
assertThat(authenticationResponse.getStatusCode()).as(authenticationResponse.toString()).isEqualTo(HttpStatus.OK);
}
@Test
public void authenticateWithWrongPassword() {
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED);
MultiValueMap<String, String> map = new LinkedMultiValueMap<>();
map.add("j_username", "my-admin");
map.add("j_password", "wrong");
ResponseEntity<String> authenticationResponse = restTemplate
.postForEntity("/app/authentication", new HttpEntity<>(map, headers), String.class);
assertThat(authenticationResponse.getStatusCode()).as(authenticationResponse.toString()).isEqualTo(HttpStatus.UNAUTHORIZED);
}
@Test
public void authenticateWithNonExistingUser() {
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED);
MultiValueMap<String, String> map = new LinkedMultiValueMap<>();
map.add("j_username", "non-existing");
map.add("j_password", "wrong");
ResponseEntity<String> authenticationResponse = restTemplate
.postForEntity("/app/authentication", new HttpEntity<>(map, headers), String.class);
assertThat(authenticationResponse.getStatusCode()).as(authenticationResponse.toString()).isEqualTo(HttpStatus.UNAUTHORIZED);
}
@TestConfiguration
static class TestBootstrapConfiguration {
@Bean
public CommandLineRunner initTestUsers(IdmIdentityService idmIdentityService) {
return args -> {
User testUser = idmIdentityService.createUserQuery().userId("my-admin").singleResult();
if (testUser == null) {
createTestUser(idmIdentityService);
}
};
}
private void createTestUser(IdmIdentityService idmIdentityService) {
User user = idmIdentityService.newUser("my-admin");
user.setPassword("my-pass");
idmIdentityService.saveUser(user);
}
}
}
| apache-2.0 |
ikhemissi/Prebid.js | src/ga.js | 6469 | /**
* ga.js - analytics adapter for google analytics
*/
var events = require('./events');
var utils = require('./utils');
var CONSTANTS = require('./constants.json');
var BID_REQUESTED = CONSTANTS.EVENTS.BID_REQUESTED;
var BID_TIMEOUT = CONSTANTS.EVENTS.BID_TIMEOUT;
var BID_RESPONSE = CONSTANTS.EVENTS.BID_RESPONSE;
var BID_WON = CONSTANTS.EVENTS.BID_WON;
var _disibleInteraction = { nonInteraction : true },
_analyticsQueue = [],
_gaGlobal = null,
_enableCheck = true,
_category = 'Prebid.js Bids',
//to track how many events we are sending to GA.
//GA limits the # of events to be sent see here ==> https://developers.google.com/analytics/devguides/collection/ios/v3/limits-quotas?hl=en
_eventCount = 0,
//limit data sent by leaving this false
_enableDistribution = false;
/**
* This will enable sending data to google analytics. Only call once, or duplicate data will be sent!
* @param {object} gaOptions to set distribution and GA global (if renamed);
* @return {[type]} [description]
*/
exports.enableAnalytics = function(gaOptions) {
if(typeof gaOptions.global !== 'undefined'){
_gaGlobal = gaOptions.global;
}
else{
//default global is window.ga
_gaGlobal = 'ga';
}
if(typeof gaOptions.enableDistribution !== 'undefined'){
_enableDistribution = gaOptions.enableDistribution;
}
var bid = null;
//first send all events fired before enableAnalytics called
var existingEvents = events.getEvents();
utils._each(existingEvents, function(eventObj) {
var args = eventObj.args;
if (!eventObj) {
return;
}
if (eventObj.eventType === BID_REQUESTED) {
//bid is 1st args
bid = args[0];
sendBidRequestToGa(bid);
} else if (eventObj.eventType === BID_RESPONSE) {
//bid is 2nd args
bid = args[1];
sendBidResponseToGa(bid);
} else if (eventObj.eventType === BID_TIMEOUT) {
var bidderArray = args[0];
sendTimedOutBiddersToGa(bidderArray);
//todo disable event listeners
} else if (eventObj.eventType === BID_WON) {
bid = args[0];
sendBidWonToGa(bid);
}
});
//Next register event listeners to send data immediately
//bidRequests
events.on(BID_REQUESTED, function(bidRequestObj) {
sendBidRequestToGa(bidRequestObj);
});
//bidResponses
events.on(BID_RESPONSE, function(adunit, bid) {
sendBidResponseToGa(bid);
});
//bidTimeouts
events.on(BID_TIMEOUT, function(bidderArray) {
sendTimedOutBiddersToGa(bidderArray);
});
//wins
events.on(BID_WON, function(bid) {
sendBidWonToGa(bid);
});
};
/**
* Check if gaGlobal or window.ga is defined on page. If defined execute all the GA commands
*/
function checkAnalytics() {
if (_enableCheck && typeof window[_gaGlobal] === 'function' ) {
for (var i = 0; i < _analyticsQueue.length; i++) {
_analyticsQueue[i].call();
}
//override push to execute the command immediately from now on
_analyticsQueue.push = function(fn) {
fn.call();
};
//turn check into NOOP
_enableCheck = false;
}
utils.logMessage('event count sent to GA: ' + _eventCount);
}
function convertToCents(dollars) {
if (dollars) {
return Math.floor(dollars * 100);
}
return 0;
}
function getLoadTimeDistribution(time) {
var distribution;
if (time >= 0 && time < 200) {
distribution = '0-200ms';
} else if (time >= 200 && time < 300) {
distribution = '200-300ms';
} else if (time >= 300 && time < 400) {
distribution = '300-400ms';
} else if (time >= 400 && time < 500) {
distribution = '400-500ms';
} else if (time >= 500 && time < 600) {
distribution = '500-600ms';
} else if (time >= 600 && time < 800) {
distribution = '600-800ms';
} else if (time >= 800 && time < 1000) {
distribution = '800-1000ms';
} else if (time >= 1000 && time < 1200) {
distribution = '1000-1200ms';
} else if (time >= 1200 && time < 1500) {
distribution = '1200-1500ms';
} else if (time >= 1500 && time < 2000) {
distribution = '1500-2000ms';
} else if (time >= 2000) {
distribution = '2000ms above';
}
return distribution;
}
function getCpmDistribution(cpm) {
var distribution;
if (cpm >= 0 && cpm < 0.5) {
distribution = '$0-0.5';
} else if (cpm >= 0.5 && cpm < 1) {
distribution = '$0.5-1';
} else if (cpm >= 1 && cpm < 1.5) {
distribution = '$1-1.5';
} else if (cpm >= 1.5 && cpm < 2) {
distribution = '$1.5-2';
} else if (cpm >= 2 && cpm < 2.5) {
distribution = '$2-2.5';
} else if (cpm >= 2.5 && cpm < 3) {
distribution = '$2.5-3';
} else if (cpm >= 3 && cpm < 4) {
distribution = '$3-4';
} else if (cpm >= 4 && cpm < 6) {
distribution = '$4-6';
} else if (cpm >= 6 && cpm < 8) {
distribution = '$6-8';
} else if (cpm >= 8) {
distribution = '$8 above';
}
return distribution;
}
function sendBidRequestToGa(bid) {
if (bid && bid.bidderCode) {
_analyticsQueue.push(function() {
_eventCount++;
window[_gaGlobal]('send', 'event', _category, 'Requests', bid.bidderCode, 1, _disibleInteraction);
});
}
//check the queue
checkAnalytics();
}
function sendBidResponseToGa(bid) {
if (bid && bid.bidder) {
_analyticsQueue.push(function() {
var cpmCents = convertToCents(bid.cpm),
bidder = bid.bidder;
if (typeof bid.timeToRespond !== 'undefined' && _enableDistribution) {
_eventCount++;
var dis = getLoadTimeDistribution(bid.timeToRespond);
window[_gaGlobal]('send', 'event', 'Prebid.js Load Time Distribution', dis, bidder, 1, _disibleInteraction);
}
if (bid.cpm > 0) {
_eventCount = _eventCount + 2;
var cpmDis = getCpmDistribution(bid.cpm);
if(_enableDistribution){
_eventCount++;
window[_gaGlobal]('send', 'event', 'Prebid.js CPM Distribution', cpmDis, bidder, 1, _disibleInteraction);
}
window[_gaGlobal]('send', 'event', _category, 'Bids', bidder, cpmCents, _disibleInteraction);
window[_gaGlobal]('send', 'event', _category, 'Bid Load Time', bidder, bid.timeToRespond, _disibleInteraction);
}
});
}
//check the queue
checkAnalytics();
}
function sendTimedOutBiddersToGa(bidderArr){
utils._each(bidderArr, function(bidderCode){
_analyticsQueue.push(function() {
_eventCount++;
window[_gaGlobal]('send', 'event', _category, 'Timeouts', bidderCode, 1, _disibleInteraction);
});
});
checkAnalytics();
}
function sendBidWonToGa(bid) {
var cpmCents = convertToCents(bid.cpm);
_analyticsQueue.push(function() {
_eventCount++;
window[_gaGlobal]('send', 'event', _category, 'Wins', bid.bidderCode, cpmCents, _disibleInteraction);
});
checkAnalytics();
}
| apache-2.0 |
AllenHom/coolweather | app/src/main/java/com/starnet/root/coolweather/model/StringConverterFactory.java | 730 | package com.starnet.root.coolweather.model;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
import okhttp3.ResponseBody;
import retrofit2.Converter;
import retrofit2.Retrofit;
/**
* Created by root on 16-8-18.
*/
public class StringConverterFactory extends Converter.Factory {
public static final StringConverterFactory INSTANCE = new StringConverterFactory();
public static StringConverterFactory create() {
return INSTANCE;
}
@Override
public Converter<ResponseBody, ?> responseBodyConverter(Type type, Annotation[] annotations, Retrofit retrofit) {
if (type == String.class) {
return StringConverter.INSTANCE;
}
return null;
}
}
| apache-2.0 |
KRMAssociatesInc/eHMP | lib/mvi/org/hl7/v3/IVLPQ.java | 4096 |
package org.hl7.v3;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.JAXBElement;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElementRef;
import javax.xml.bind.annotation.XmlElementRefs;
import javax.xml.bind.annotation.XmlSeeAlso;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for IVL_PQ complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="IVL_PQ">
* <complexContent>
* <extension base="{urn:hl7-org:v3}SXCM_PQ">
* <choice minOccurs="0">
* <sequence>
* <element name="low" type="{urn:hl7-org:v3}IVXB_PQ"/>
* <choice minOccurs="0">
* <element name="width" type="{urn:hl7-org:v3}PQ" minOccurs="0"/>
* <element name="high" type="{urn:hl7-org:v3}IVXB_PQ" minOccurs="0"/>
* </choice>
* </sequence>
* <element name="high" type="{urn:hl7-org:v3}IVXB_PQ"/>
* <sequence>
* <element name="width" type="{urn:hl7-org:v3}PQ"/>
* <element name="high" type="{urn:hl7-org:v3}IVXB_PQ" minOccurs="0"/>
* </sequence>
* <sequence>
* <element name="center" type="{urn:hl7-org:v3}PQ"/>
* <element name="width" type="{urn:hl7-org:v3}PQ" minOccurs="0"/>
* </sequence>
* </choice>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "IVL_PQ", propOrder = {
"rest"
})
@XmlSeeAlso({
BXITIVLPQ.class
})
public class IVLPQ
extends SXCMPQ
{
@XmlElementRefs({
@XmlElementRef(name = "high", namespace = "urn:hl7-org:v3", type = JAXBElement.class, required = false),
@XmlElementRef(name = "width", namespace = "urn:hl7-org:v3", type = JAXBElement.class, required = false),
@XmlElementRef(name = "low", namespace = "urn:hl7-org:v3", type = JAXBElement.class, required = false),
@XmlElementRef(name = "center", namespace = "urn:hl7-org:v3", type = JAXBElement.class, required = false)
})
protected List<JAXBElement<? extends PQ>> rest;
/**
* Gets the rest of the content model.
*
* <p>
* You are getting this "catch-all" property because of the following reason:
* The field name "High" is used by two different parts of a schema. See:
* line 150 of http://10.4.4.210:8080/MockMVI/psim_webservice/IdMWebService?WSDL&interface=VAIdMPort&part=datatypes.xsd
* line 143 of http://10.4.4.210:8080/MockMVI/psim_webservice/IdMWebService?WSDL&interface=VAIdMPort&part=datatypes.xsd
* <p>
* To get rid of this property, apply a property customization to one
* of both of the following declarations to change their names:
* Gets the value of the rest property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the rest property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getRest().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link JAXBElement }{@code <}{@link PQ }{@code >}
* {@link JAXBElement }{@code <}{@link IVXBPQ }{@code >}
* {@link JAXBElement }{@code <}{@link IVXBPQ }{@code >}
* {@link JAXBElement }{@code <}{@link PQ }{@code >}
*
*
*/
public List<JAXBElement<? extends PQ>> getRest() {
if (rest == null) {
rest = new ArrayList<JAXBElement<? extends PQ>>();
}
return this.rest;
}
}
| apache-2.0 |
yagitoshiro/Study4 | Resources/ui/common/FirstView_3.js | 1499 | //FirstView Component Constructor
function FirstView() {
//create object instance, a parasitic subclass of Observable
var self = Ti.UI.createView();
//label using localization-ready strings from <app dir>/i18n/en/strings.xml
var button = Ti.UI.createButton({
color:'#000000',
title:String.format(L('welcome'),'Titanium'),
height:Ti.UI.SIZE,
width:Ti.UI.SIZE
});
self.add(button);
function success(e){
var image = e.media;
var imageView = Ti.UI.createImageView({image:image, top:0, width:Ti.UI.SIZE, height:Ti.UI.SIZE});
var image_window = Ti.UI.createWindow({backgroundColor:'White'});
imageView.addEventListener('click', function(e){
image_window.close();
});
image_window.add(imageView);
image_window.open();
}
function error(e){
}
function open_gallery(){
Ti.Media.openPhotoGallery({
success: success,
error: error
});
}
function open_camera(){
Ti.Media.showCamera({
success: success,
error: error
});
}
//Add behavior for UI
button.addEventListener('click', function(e) {
var dialog = Ti.UI.createOptionDialog({
options: [L('open_camera'), L('open_gallery'), L('cancel')],
cancel: 2,
title: L('please_select')
});
dialog.addEventListener('click', function(e){
if(e.index == 0){
open_camera();
}else if(e.index == 1){
open_gallery();
}
});
dialog.show();
});
return self;
}
module.exports = FirstView;
| apache-2.0 |
Clinical3PO/Platform | dev/c3po-angularjs-visualization/web/client/components/widgets/widget-server-directive.js | 3832 | 'use strict';
/**
* @ngdoc directive
* @name serverWidget
* @restrict A
* @scope
*
* @description
*
* Adds chartjs line ticker data to widget
*
* @usage
* ```html
* <widget chartjs-ticker-widget>
* ```
*/
angular.module('c-3po')
.directive('serverWidget', function ($timeout, $interval) {
return {
require: 'widget',
restrict: 'A',
link: function ($scope) {
$scope.serverCharts = {
bandwidth: {
dataLength: 50,
maximum: 40,
data: [[]],
labels: [],
options: {
animation: false,
showTooltips: false,
pointDot: false,
datasetStrokeWidth: 0.5,
maintainAspectRatio: false,
},
colours: ['#4285F4']
},
cpu: {
dataLength: 50,
maximum: 100,
data: [[]],
labels: [],
options: {
animation: false,
showTooltips: false,
pointDot: false,
datasetStrokeWidth: 0.5,
maintainAspectRatio: false,
},
colours: ['#DB4437']
},
data24hrs: {
series: ['Bandwidth', 'CPU'],
labels: ['00:00', '01:00', '02:00', '03:00', '04:00', '05:00', '06:00', '07:00', '08:00', '09:00', '10:00', '11:00', '12:00', '13:00', '14:00', '15:00', '16:00', '17:00', '18:00', '19:00', '20:00', '21:00', '22:00', '23:00'],
},
data7days: {
series: ['Bandwidth', 'CPU'],
labels: ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'],
},
data365days: {
series: ['Bandwidth', 'CPU'],
labels: ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'],
},
};
randomData($scope.serverCharts.data24hrs);
randomData($scope.serverCharts.data7days);
randomData($scope.serverCharts.data365days);
// Update the dataset at 25FPS for a smoothly-animating chart
$interval(function () {
getLiveChartData($scope.serverCharts.bandwidth);
getLiveChartData($scope.serverCharts.cpu);
}, 1000);
function getLiveChartData (chart) {
if (chart.data[0].length) {
chart.labels = chart.labels.slice(1);
chart.data[0] = chart.data[0].slice(1);
}
while (chart.data[0].length < chart.dataLength) {
chart.labels.push('');
chart.data[0].push(getRandomValue(chart.data[0], chart.maximum));
}
}
function randomData(chart) {
chart.data = [];
for(var series = 0; series < chart.series.length; series++) {
var row = [];
for(var label = 0; label < chart.labels.length; label++) {
row.push(Math.floor((Math.random() * 100) + 1));
}
chart.data.push(row);
}
}
function getRandomValue (data, max) {
var l = data.length, previous = l ? data[l - 1] : 50;
var y = previous + Math.random() * 10 - 5;
return y < 0 ? 0 : y > max ? max : y;
}
}
};
});
| apache-2.0 |
glycerine/vj | src/veracity/src/js_tests/longtests__st_portability_06__W9838.js | 3584 | /*
Copyright 2010-2013 SourceGear, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
load("../js_test_lib/vscript_test_lib.js");
//////////////////////////////////////////////////////////////////
function st_portability_06()
{
load("update_helpers.js"); // load the helper functions
initialize_update_helpers(this); // initialize helper functions
// this.verbose = true; // add this wherever you want it. It'll stick through the end of the stGroup.
//////////////////////////////////////////////////////////////////
this.test_0 = function()
{
this.do_fsobj_mkdir("DIR");
this.do_fsobj_mkdir("AAA");
vscript_test_wc__addremove();
this.do_commit("Added AAA");
vscript_test_wc__move("AAA", "DIR");
this.do_fsobj_mkdir("AAA");
vscript_test_wc__addremove();
var expect_test = new Array;
expect_test["Moved"] = [ "@/DIR/AAA" ];
expect_test["Added"] = [ "@/AAA" ];
vscript_test_wc__confirm_wii(expect_test);
//////////////////////////////////////////////////////////////////
// a REVERT on "DIR/AAA" ("DIR/AAA" ==> "AAA") will
// collide with the new "AAA" on all platforms.
print("");
print("================================================================");
print("Trying revert-items on DIR/AAA...");
print("");
vscript_test_wc__revert_items__expect_error( [ "DIR/AAA" ],
"An item will interfere with revert" );
// nothing should have changed on disk
vscript_test_wc__confirm_wii(expect_test);
print("");
print("================================================================");
print("Trying revert --all...");
print("");
vscript_test_wc__revert_all__expect_error( "test",
"An item will interfere with revert" );
// nothing should have changed on disk
vscript_test_wc__confirm_wii(expect_test);
}
this.test_1 = function()
{
this.do_fsobj_mkdir("BBB");
vscript_test_wc__addremove();
this.do_commit("Added BBB");
vscript_test_wc__move("BBB", "DIR");
this.do_fsobj_mkdir("bbb");
vscript_test_wc__addremove();
var expect_test = new Array;
expect_test["Moved"] = [ "@/DIR/BBB" ];
expect_test["Added"] = [ "@/bbb" ];
vscript_test_wc__confirm_wii(expect_test);
//////////////////////////////////////////////////////////////////
// a REVERT on "DIR/BBB" ("DIR/BBB" ==> "BBB") will
// collide with the new "bbb" on Mac/Windows. so we
// should get a portability warning on all platforms.
print("");
print("================================================================");
print("Trying revert of DIR/BBB...");
print("");
vscript_test_wc__revert_items__expect_error( [ "DIR/BBB" ],
"An item will interfere with revert" );
// nothing should have changed on disk
vscript_test_wc__confirm_wii(expect_test);
print("");
print("================================================================");
print("Trying revert --all...");
print("");
vscript_test_wc__revert_all__expect_error( "test",
"Portability conflict" );
// nothing should have changed on disk
vscript_test_wc__confirm_wii(expect_test);
}
}
| apache-2.0 |
yurloc/assertj-core | src/test/java/org/assertj/core/data/MapEntry_toString_Test.java | 1203 | /*
* Created on Dec 21, 2010
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright @2010-2012 the original author or authors.
*/
package org.assertj.core.data;
import static junit.framework.Assert.assertEquals;
import static org.assertj.core.data.MapEntry.entry;
import org.assertj.core.data.MapEntry;
import org.junit.*;
/**
* Tests for <{@link MapEntry#toString()}.
*
* @author Alex Ruiz
*/
public class MapEntry_toString_Test {
private static MapEntry entry;
@BeforeClass
public static void setUpOnce() {
entry = entry("name", "Yoda");
}
@Test
public void should_implement_toString() {
assertEquals("MapEntry[key='name', value='Yoda']", entry.toString());
}
}
| apache-2.0 |
bochackathon-fintech/Heroes-Lab | app/Conversations/ForeignExchangeRate.php | 1974 | <?php
/**
* Created by PhpStorm.
* User: DarkP
* Date: 6/10/2017
* Time: 12:25 PM
*/
namespace app\Conversations;
class ForeignExchangeRate
{
protected static $unusedRates = [
'TRY',
'THB',
'CAD',
'CZK',
'DKK',
'KRW',
'SGD',
'ZAR',
'NOK',
'INR',
'ILS',
'PHP',
'RON',
'RUB',
'BGN',
'HKD',
'MXN',
'IDR',
'BRL'
];
public static function run(string $currency)
{
return self::getRates($currency);
}
/**
* Collect the rates form the JSON api
* @param $base
* @return mixed
*/
public static function getRates($base)
{
$ratesJsonData = file_get_contents('http://api.fixer.io/latest?base=' . $base, true);
if (!$ratesJsonData) {
return 'Sorry I don`t know this rate base. Try EUR, USD, CHF...';
}
$ratesData = json_decode($ratesJsonData);
$rates = self::removeUnusedRates((array)$ratesData->rates);
return self::formatRates($ratesData->base, $ratesData->date, $rates);
}
/**
* Format the rates for the message
* @param $ratesBase
* @param $ratesDate
* @param $rates
* @return string
*/
private static function formatRates(string $ratesBase, string $ratesDate, array $rates)
{
$returnMessage = '💰 Your rates based on ' . $ratesBase . ':\r\n' . 'Date: ' . $ratesDate . '\r\n';
foreach ($rates as $key => $rate) {
$returnMessage .= $key . ' ' . $rate . '\n\r';
}
return $returnMessage;
}
/**
* Remove some unused rates
* @param array $rates
* @return array
*/
private static function removeUnusedRates(array $rates)
{
return array_filter($rates, function ($key) {
return !in_array($key, self::$unusedRates);
}, ARRAY_FILTER_USE_KEY);
}
} | apache-2.0 |
prjm/paspaspas | PasPasPas.Typings/src/Common/Scope.cs | 3439 | #nullable disable
using System;
using System.Collections.Generic;
using PasPasPas.Globals.Types;
using PasPasPas.Infrastructure.Utils;
namespace PasPasPas.Typings.Common {
/// <summary>
/// scope for identifier visibility
/// </summary>
public class Scope {
private readonly IOrderedDictionary<string, ITypeSymbol> entries
= new OrderedDictionary<string, ITypeSymbol>(StringComparer.OrdinalIgnoreCase);
private readonly Scope parent;
private readonly ITypeRegistry typeRegistry;
/// <summary>
/// defined types
/// </summary>
public ITypeRegistry TypeRegistry
=> typeRegistry;
/// <summary>
/// create a new scope
/// </summary>
public Scope(ITypeRegistry types) {
typeRegistry = types;
parent = null;
}
/// <summary>
/// create a new scope
/// </summary>
/// <param name="parentScope">parent scope</param>
public Scope(Scope parentScope) {
parent = parentScope;
typeRegistry = parentScope.typeRegistry;
}
/// <summary>
/// gets the root of the scope
/// </summary>
public Scope Root {
get {
var result = this;
while (result.parent != null)
result = result.parent;
return result;
}
}
/// <summary>
/// get the parent scope
/// </summary>
public Scope Parent
=> parent;
/// <summary>
/// list of entries
/// </summary>
public IEnumerable<KeyValuePair<string, ITypeSymbol>> AllEntriesInOrder {
get {
var scope = this;
while (scope != default) {
foreach (var entry in scope.entries)
yield return entry;
scope = scope.Parent;
}
}
}
/// <summary>
/// open a new child scope
/// </summary>
/// <returns>new child scope</returns>
public Scope Open()
=> new Scope(this);
/// <summary>
/// close this scope
/// </summary>
/// <returns>parent scope</returns>
public Scope Close() {
if (parent == null)
throw new InvalidOperationException();
return parent;
}
/// <summary>
/// try to resolve a name
/// </summary>
/// <param name="name"></param>
/// <param name="reference"></param>
/// <returns></returns>
public bool TryToResolve(string name, out ITypeSymbol reference) {
var result = entries.TryGetValue(name, out reference);
if (!result && Parent != default)
return Parent.TryToResolve(name, out reference);
return result;
}
/// <summary>
/// a a new entry to this scope
/// </summary>
/// <param name="name"></param>
/// <param name="scopeEntry">scope entry</param>
public void AddEntry(string name, ITypeSymbol scopeEntry)
=> entries[name] = scopeEntry;
}
}
| apache-2.0 |
plus1s/shadowsocks-py-mu | shadowsocks/tcprelay.py | 33893 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
# Copyright 2016 Howard Liu
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import time
import socket
import errno
import struct
import logging
import random
from shadowsocks import cryptor, eventloop, shell, common
from shadowsocks.common import parse_header, onetimeauth_verify, \
onetimeauth_gen, ONETIMEAUTH_BYTES, ONETIMEAUTH_CHUNK_BYTES, \
ONETIMEAUTH_CHUNK_DATA_LEN, ADDRTYPE_AUTH, U, D
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
# helper exceptions for TCPRelayHandler
class BadSocksHeader(Exception):
pass
class NoAcceptableMethods(Exception):
pass
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._cryptor = cryptor.Cryptor(config['password'],
config['method'])
self._ota_enable = config.get('one_time_auth', False)
self._ota_enable_session = self._ota_enable
self._ota_buff_head = b''
self._ota_buff_data = b''
self._ota_len = 0
self._ota_chunk_idx = 0
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
self._forbidden_iplist = config.get('forbidden_ip')
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, direction=D, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, direction, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if not dirty:
return
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
uncomplete = True
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if not self._is_local:
if self._ota_enable_session:
self._ota_chunk_data(data,
self._data_to_write_to_remote.append)
else:
self._data_to_write_to_remote.append(data)
return
if self._ota_enable_session:
data = self._ota_chunk_data_gen(data)
data = self._cryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._config['fast_open'] and not self._fastopen_connected:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN,
self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (socket.error, OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
self._fastopen_connected = False
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
self.destroy()
@shell.exception_handle(self_=True, destroy=True, conn_err=True)
def _handle_stage_addr(self, data):
addr, port = self._client_address
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('U[%d] UDP associate' %
self._config['server_port'])
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('U[%d] Unknown command %d',
self._config['server_port'], cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('TCP can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
if self._config['firewall_ports'] and self._config['server_port'] not in self._config['firewall_trusted']:
# Firewall enabled
if self._config['firewall_mode'] == 'blacklist' and remote_port in self._config['firewall_ports']:
firewall_blocked = True
elif self._config['firewall_mode'] == 'whitelist' and remote_port not in self._config['firewall_ports']:
firewall_blocked = True
else:
firewall_blocked = False
else:
firewall_blocked = False
if firewall_blocked:
logging.warning('U[%d] TCP PORT BANNED: RP[%d] A[%s-->%s]' % (
self._config['server_port'], remote_port,
addr, common.to_str(remote_addr)
))
return
else:
logging.info('U[%d] TCP CONN: RP[%d] A[%s-->%s]' % (
self._config['server_port'], remote_port,
addr, common.to_str(remote_addr)
))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._cryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if self._ota_enable_session:
data = data[header_length:]
self._ota_chunk_data(data,
self._data_to_write_to_remote.append)
elif len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("TCP getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('U[%d] IP %s is in forbidden list, rejected' %
(self._config['server_port'], common.to_str(sa[0])))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
logging.error(error)
self.destroy()
return
if result and result[1]:
ip = result[1]
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (socket.error, OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
self.destroy()
def _write_to_sock_remote(self, data):
self._write_to_sock(data, self._remote_sock)
def _ota_chunk_data(self, data, data_cb):
# spec https://shadowsocks.org/en/spec/one-time-auth.html
unchunk_data = b''
while len(data) > 0:
if self._ota_len == 0:
# get DATA.LEN + HMAC-SHA1
length = ONETIMEAUTH_CHUNK_BYTES - len(self._ota_buff_head)
self._ota_buff_head += data[:length]
data = data[length:]
if len(self._ota_buff_head) < ONETIMEAUTH_CHUNK_BYTES:
# wait more data
return
data_len = self._ota_buff_head[:ONETIMEAUTH_CHUNK_DATA_LEN]
self._ota_len = struct.unpack('>H', data_len)[0]
length = min(self._ota_len - len(self._ota_buff_data), len(data))
self._ota_buff_data += data[:length]
data = data[length:]
if len(self._ota_buff_data) == self._ota_len:
# get a chunk data
_hash = self._ota_buff_head[ONETIMEAUTH_CHUNK_DATA_LEN:]
_data = self._ota_buff_data
index = struct.pack('>I', self._ota_chunk_idx)
key = self._cryptor.decipher_iv + index
if onetimeauth_verify(_hash, _data, key) is False:
logging.warn('U[%d] TCP One time auth fail, chunk is dropped!' % self._config[
'server_port'])
else:
unchunk_data += _data
self._ota_chunk_idx += 1
self._ota_buff_head = b''
self._ota_buff_data = b''
self._ota_len = 0
data_cb(unchunk_data)
return
def _ota_chunk_data_gen(self, data):
data_len = struct.pack(">H", len(data))
index = struct.pack('>I', self._ota_chunk_idx)
key = self._cryptor.cipher_iv + index
sha110 = onetimeauth_gen(data, key)
self._ota_chunk_idx += 1
return data_len + sha110 + data
def _handle_stage_stream(self, data):
if self._is_local:
if self._ota_enable_session:
data = self._ota_chunk_data_gen(data)
data = self._cryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
else:
if self._ota_enable_session:
self._ota_chunk_data(data, self._write_to_sock_remote)
else:
self._write_to_sock(data, self._remote_sock)
return
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
return
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(U, len(data))
if not is_local:
data = self._cryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
self._handle_stage_stream(data)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except socket.error as err:
error_no = err.args[0]
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
return
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(D, len(data))
if self._is_local:
data = self._cryptor.decrypt(data)
else:
data = self._cryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error('U[%d] %s' % (self._config['server_port'], eventloop.get_sock_error(self._local_sock)))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error('U[%d] %s' % (self._config['server_port'], eventloop.get_sock_error(self._remote_sock)))
self.destroy()
@shell.exception_handle(self_=True, destroy=True)
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
if self._local_sock:
addr, port = self._local_sock.getpeername()[:2]
logging.error('U[%d] %s when handling connection from %s:%d' %
(self._config['server_port'], e, addr, port))
else:
logging.error('U[%d] Unknown TCP error occurred' % self._config['server_port'])
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("TCP getaddrinfo failed for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if self._config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.warning(
'Fast open is not available, automatically turned off')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, direction, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, direction, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except socket.error as err:
error_no = err.args[0]
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
return
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(err)
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('TCP port %d closed', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
| apache-2.0 |
dahlstrom-g/intellij-community | plugins/textmate/src/org/jetbrains/plugins/textmate/editor/TextMateBackspaceHandler.java | 1942 | package org.jetbrains.plugins.textmate.editor;
import com.intellij.codeInsight.editorActions.BackspaceHandlerDelegate;
import com.intellij.openapi.editor.Document;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.ex.EditorEx;
import com.intellij.openapi.editor.highlighter.EditorHighlighter;
import com.intellij.openapi.editor.highlighter.HighlighterIterator;
import com.intellij.psi.PsiFile;
import com.intellij.psi.tree.IElementType;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.plugins.textmate.TextMateFileType;
import org.jetbrains.plugins.textmate.language.preferences.TextMateBracePair;
public class TextMateBackspaceHandler extends BackspaceHandlerDelegate {
@Override
public void beforeCharDeleted(char c, @NotNull PsiFile file, @NotNull Editor editor) {
}
@Override
public boolean charDeleted(char c, PsiFile file, @NotNull Editor editor) {
if (file.getFileType() == TextMateFileType.INSTANCE) {
final int offset = editor.getCaretModel().getOffset();
EditorHighlighter highlighter = ((EditorEx)editor).getHighlighter();
HighlighterIterator iterator = highlighter.createIterator(offset);
if (offset == 0 && iterator.atEnd()) {
return false;
}
final IElementType tokenType = iterator.getTokenType();
if (tokenType != null) {
String scopeSelector = tokenType.toString();
final TextMateBracePair pairForChar = TextMateEditorUtils.getSmartTypingPairForLeftChar(c, scopeSelector);
if (pairForChar != null) {
final Document document = editor.getDocument();
if (document.getTextLength() > offset) {
char prevChar = document.getCharsSequence().charAt(offset);
if (prevChar == pairForChar.rightChar) {
document.deleteString(offset, offset + 1);
return true;
}
}
}
}
}
return false;
}
}
| apache-2.0 |
NationalSecurityAgency/ghidra | Ghidra/Features/GhidraServer/src/main/java/ghidra/server/UserManager.java | 22938 | /* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.server;
import java.io.*;
import java.util.*;
import java.util.regex.Pattern;
import javax.security.auth.login.FailedLoginException;
import javax.security.auth.x500.X500Principal;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ghidra.framework.remote.User;
import ghidra.framework.store.local.LocalFileSystem;
import ghidra.util.*;
import ghidra.util.exception.DuplicateNameException;
/**
* <code>UserManager</code> manages the set of users associated with a running GhidraServer.
* Support is also provided for managing and authenticating local user passwords when
* needed.
*/
public class UserManager {
static final Logger log = LogManager.getLogger(UserManager.class);
public static final String X500_NAME_FORMAT = X500Principal.RFC2253;
public static final String ANONYMOUS_USERNAME = User.ANONYMOUS_USERNAME;
public static final String USER_PASSWORD_FILE = "users";
public static final String DN_LOG_FILE = "UnknownDN.log";
private static final String SSH_KEY_FOLDER = LocalFileSystem.HIDDEN_DIR_PREFIX + "ssh";
private static final String SSH_PUBKEY_EXT = ".pub";
private static final char[] DEFAULT_PASSWORD = "changeme".toCharArray();
private static final int DEFAULT_PASSWORD_TIMEOUT_DAYS = 1; // 24-hours
private static final int NO_EXPIRATION = -1;
private RepositoryManager repositoryMgr;
private final File userFile;
private final File sshDir;
private boolean enableLocalPasswords;
private long defaultPasswordExpirationMS;
private PrintWriter dnLogOut;
private LinkedHashMap<String, UserEntry> userList = new LinkedHashMap<>();
private HashMap<X500Principal, UserEntry> dnLookupMap = new HashMap<>();
private long lastUserListChange;
private boolean userListUpdateInProgress = false;
/**
* Construct server user manager
* @param repositoryMgr repository manager (used for queued command processing)
* @param enableLocalPasswords if true user passwords will be maintained
* within local 'users' file
* @param defaultPasswordExpirationDays password expiration in days when
* local passwords are enabled (0 = no expiration)
*/
UserManager(RepositoryManager repositoryMgr, boolean enableLocalPasswords,
int defaultPasswordExpirationDays) {
this.repositoryMgr = repositoryMgr;
this.enableLocalPasswords = enableLocalPasswords;
if (defaultPasswordExpirationDays < 0) {
defaultPasswordExpirationDays = DEFAULT_PASSWORD_TIMEOUT_DAYS;
}
this.defaultPasswordExpirationMS = defaultPasswordExpirationDays * 24L * 3600L * 1000L;
log.info("Instantiating User Manager " +
(enableLocalPasswords ? "(w/password management)" : ""));
userFile = new File(repositoryMgr.getRootDir(), USER_PASSWORD_FILE);
try {
// everything must be constructed before processing commands
updateUserList(false);
log.info("User file contains " + userList.size() + " entries");
}
catch (FileNotFoundException e) {
log.error("Existing User file not found.");
}
catch (IOException e) {
log.error(e);
}
log.info("Known Users:");
Iterator<String> iter = userList.keySet().iterator();
while (iter.hasNext()) {
String name = iter.next();
String dnStr = "";
UserEntry entry = userList.get(name);
if (entry != null) {
X500Principal x500User = entry.x500User;
if (x500User != null) {
dnStr = " DN={" + x500User.getName() + "}";
}
}
log.info(" " + name + dnStr);
}
sshDir = new File(repositoryMgr.getRootDir(), SSH_KEY_FOLDER);
initSSH();
}
private void initSSH() {
if (!sshDir.exists()) {
sshDir.mkdir();
return;
}
String[] list = sshDir.list((dir, name) -> name.endsWith(SSH_PUBKEY_EXT));
if (list.length == 0) {
return;
}
log.info("Users with stored SSH public key:");
for (String fname : list) {
String user = fname.substring(0, fname.length() - SSH_PUBKEY_EXT.length());
if (!userList.containsKey(user)) {
continue; // ignore invalid user
}
log.info(" " + user);
}
}
/**
* Get the SSH public key file for the specified user
* if it exists.
* @param user
* @return SSH public key file or null if key unavailable
*/
public File getSSHPubKeyFile(String user) {
if (!userList.containsKey(user)) {
return null;
}
File f = new File(sshDir, user + SSH_PUBKEY_EXT);
if (f.isFile()) {
return f;
}
return null;
}
/**
* Add a user.
* @param username user name/SID
* @param passwordHash MD5 hash of initial password or null if explicit password reset required
* @param dn X500 distinguished name for user (may be null)
* @throws DuplicateNameException if username already exists
* @throws IOException if IO error occurs
*/
private synchronized void addUser(String username, char[] passwordHash, X500Principal x500User)
throws DuplicateNameException, IOException {
if (username == null) {
throw new IllegalArgumentException();
}
updateUserList(true);
if (userList.containsKey(username)) {
throw new DuplicateNameException("User " + username + " already exists");
}
UserEntry entry = new UserEntry();
entry.username = username;
entry.passwordHash = passwordHash;
entry.passwordTime = (new Date()).getTime();
entry.x500User = x500User;
userList.put(username, entry);
if (x500User != null) {
dnLookupMap.put(x500User, entry);
}
writeUserList();
}
/**
* Add a user.
* @param username user name/SID
* @throws DuplicateNameException if username already exists
* @throws IOException if IO error occurs
*/
public void addUser(String username) throws DuplicateNameException, IOException {
addUser(username, (char[]) null);
}
/**
* Add a user with optional salted password hash.
* @param username user name/SID
* @param saltedPasswordHash optional user password hash (may be null)
* @throws DuplicateNameException if username already exists
* @throws IOException if IO error occurs
*/
void addUser(String username, char[] saltedPasswordHash)
throws DuplicateNameException, IOException {
if (saltedPasswordHash == null && enableLocalPasswords) {
saltedPasswordHash = getDefaultPasswordHash();
}
addUser(username, saltedPasswordHash, null);
}
/**
* Add a user.
* @param username user name/SID
* @param x500User X500 distinguished name for user (may be null)
* @throws DuplicateNameException if username already exists
* @throws IOException if IO error occurs
*/
public void addUser(String username, X500Principal x500User)
throws DuplicateNameException, IOException {
char[] passwordHash = enableLocalPasswords ? getDefaultPasswordHash() : null;
addUser(username, passwordHash, x500User);
}
/**
* Returns the X500 distinguished name for the specified user.
* @param username user name/SID
* @return X500 distinguished name
* @throws IOException
*/
public synchronized X500Principal getDistinguishedName(String username) throws IOException {
updateUserList(true);
UserEntry entry = userList.get(username);
if (entry != null) {
return entry.x500User;
}
return null;
}
/**
* Returns the username associated with the specified distinguished name
* @param x500User a user's X500 distinguished name
* @return username or null if not found
*/
public synchronized String getUserByDistinguishedName(X500Principal x500User)
throws IOException {
updateUserList(true);
UserEntry entry = dnLookupMap.get(x500User);
return entry != null ? entry.username : null;
}
/**
* Sets the X500 distinguished name for a user
* @param username user name/SID
* @param x500User X500 distinguished name
* @return true if successful, false if user not found
* @throws IOException
*/
public synchronized boolean setDistinguishedName(String username, X500Principal x500User)
throws IOException {
updateUserList(true);
UserEntry oldEntry = userList.remove(username);
if (oldEntry != null) {
if (oldEntry.x500User != null) {
dnLookupMap.remove(oldEntry.x500User);
}
UserEntry entry = new UserEntry();
entry.username = username;
entry.passwordHash = oldEntry.passwordHash;
entry.x500User = x500User;
userList.put(username, entry);
if (x500User != null) {
dnLookupMap.put(x500User, entry);
}
writeUserList();
return true;
}
return false;
}
private void checkValidPasswordHash(char[] saltedPasswordHash) throws IOException {
if (saltedPasswordHash == null ||
saltedPasswordHash.length != HashUtilities.SHA256_SALTED_HASH_LENGTH) {
throw new IOException("Invalid password hash");
}
for (int i = 0; i < HashUtilities.SALT_LENGTH; i++) {
if (!isLetterOrDigit(saltedPasswordHash[i])) {
throw new IOException(
"Password set failed due invalid salt: " + (new String(saltedPasswordHash)) +
" (" + i + "," + saltedPasswordHash[i] + ")");
}
}
for (int i = HashUtilities.SALT_LENGTH; i < saltedPasswordHash.length; i++) {
if (!isLowercaseHexDigit(saltedPasswordHash[i])) {
throw new IOException(
"Password set failed due to invalid hash: " + (new String(saltedPasswordHash)) +
" (" + i + "," + saltedPasswordHash[i] + ")");
}
}
}
private boolean isLetterOrDigit(char c) {
if (c < '0') {
return false;
}
if (c > '9' && c < 'A') {
return false;
}
if (c > 'Z' && c < 'a') {
return false;
}
return c <= 'z';
}
private boolean isLowercaseHexDigit(char c) {
if (c < '0') {
return false;
}
if (c > '9' && c < 'a') {
return false;
}
return c <= 'f';
}
/**
* Sets the local password hash for a user
* @param username user name/SID
* @param saltedSHA256PasswordHash 4-character salt followed by 64-hex digit SHA256 password hash for new password
* @param isTemporary if true password will be set to expire
* @return true if successful, false if user not found
* @throws IOException
*/
public synchronized boolean setPassword(String username, char[] saltedSHA256PasswordHash,
boolean isTemporary) throws IOException {
if (!enableLocalPasswords) {
throw new IOException("Local passwords are not used");
}
checkValidPasswordHash(saltedSHA256PasswordHash);
updateUserList(true);
UserEntry oldEntry = userList.remove(username);
if (oldEntry != null) {
UserEntry entry = new UserEntry();
entry.username = username;
entry.passwordHash = saltedSHA256PasswordHash;
entry.passwordTime = isTemporary ? (new Date()).getTime() : NO_EXPIRATION;
entry.x500User = oldEntry.x500User;
userList.put(username, entry);
if (entry.x500User != null) {
dnLookupMap.put(entry.x500User, entry);
}
writeUserList();
return true;
}
return false;
}
/**
* Returns true if local passwords are in use and can be changed by the user.
* @see #setPassword(String, char[])
*/
public boolean canSetPassword(String username) {
UserEntry userEntry = userList.get(username);
return (enableLocalPasswords && userEntry != null && userEntry.passwordHash != null);
}
/**
* Returns the amount of time in milliseconds until the
* user's password will expire.
* @param username user name
* @return time until expiration or -1 if it will not expire
*/
public long getPasswordExpiration(String username) throws IOException {
updateUserList(true);
UserEntry userEntry = userList.get(username);
// indicate immediate expiration for users with short hash (non salted SHA-256)
if (userEntry != null && userEntry.passwordHash != null &&
userEntry.passwordHash.length != HashUtilities.SHA256_SALTED_HASH_LENGTH) {
return 0;
}
return getPasswordExpiration(userEntry);
}
/**
* Returns the amount of time in milliseconds until the
* user's password will expire.
* @param user user entry
* @return time until expiration or -1 if it will not expire
*/
private long getPasswordExpiration(UserEntry user) {
long timeRemaining = 0;
if (user != null) {
// Expiration only applies to default password
if (defaultPasswordExpirationMS == 0 || user.passwordTime == NO_EXPIRATION) {
return -1;
}
if (user.passwordTime != 0) {
timeRemaining =
defaultPasswordExpirationMS - ((new Date()).getTime() - user.passwordTime);
if (timeRemaining <= 0) {
timeRemaining = 0;
}
}
}
return timeRemaining;
}
/**
* Reset the local password to the 'changeme' for the specified user.
* @param username
* @param saltedPasswordHash optional user password hash (may be null)
* @return true if password updated successfully.
* @throws IOException
*/
public boolean resetPassword(String username, char[] saltedPasswordHash) throws IOException {
if (!enableLocalPasswords) {
return false;
}
return setPassword(username,
saltedPasswordHash != null ? saltedPasswordHash : getDefaultPasswordHash(), true);
}
private char[] getDefaultPasswordHash() {
return HashUtilities.getSaltedHash(HashUtilities.SHA256_ALGORITHM, DEFAULT_PASSWORD);
}
/**
* Remove the specified user from the server access list
* @param username user name/SID
* @throws IOException
*/
public synchronized void removeUser(String username) throws IOException {
updateUserList(true);
UserEntry oldEntry = userList.remove(username);
if (oldEntry != null) {
if (oldEntry.x500User != null) {
dnLookupMap.remove(oldEntry.x500User);
}
writeUserList();
}
}
/**
* Get list of all users known to server.
* @return list of known users
* @throws IOException
*/
public synchronized String[] getUsers() throws IOException {
updateUserList(true);
String[] names = new String[userList.size()];
Iterator<String> iter = userList.keySet().iterator();
int i = 0;
while (iter.hasNext()) {
names[i++] = iter.next();
}
return names;
}
/**
* Refresh the server's user list and process any pending UserAdmin commands.
* @param processCmds TODO
* @throws IOException
*/
synchronized void updateUserList(boolean processCmds) throws IOException {
if (userListUpdateInProgress) {
return;
}
userListUpdateInProgress = true;
try {
readUserListIfNeeded();
clearExpiredPasswords();
if (processCmds) {
UserAdmin.processCommands(repositoryMgr);
}
}
finally {
userListUpdateInProgress = false;
}
}
/**
* Clear all local user passwords which have expired.
* @throws IOException
*/
private void clearExpiredPasswords() throws IOException {
if (defaultPasswordExpirationMS == 0) {
return;
}
boolean dataChanged = false;
Iterator<UserEntry> it = userList.values().iterator();
while (it.hasNext()) {
UserEntry entry = it.next();
if (enableLocalPasswords && getPasswordExpiration(entry) == 0) {
entry.passwordHash = null;
entry.passwordTime = 0;
dataChanged = true;
log.warn("Default password expired for user '" + entry.username + "'");
}
}
if (dataChanged) {
writeUserList();
}
}
/**
* Read user data from file if the timestamp on the file has changed.
*
* @throws IOException
*/
private void readUserListIfNeeded() throws IOException {
long lastMod = userFile.lastModified();
if (lastUserListChange == lastMod) {
if (lastMod == 0) {
// Create empty file if it does not yet exist
writeUserList();
}
return;
}
LinkedHashMap<String, UserEntry> list = new LinkedHashMap<>();
HashMap<X500Principal, UserEntry> lookupMap = new HashMap<>();
readUserList(userFile, list, lookupMap);
userList = list;
dnLookupMap = lookupMap;
lastUserListChange = lastMod;
}
/**
* Print to stdout the set of user names with access to the specified repositories root.
* This is intended to be used with the svrAdmin console command
* @param repositoriesRootDir repositories root directory
*/
static void listUsers(File repositoriesRootDir) {
File userFile = new File(repositoriesRootDir, USER_PASSWORD_FILE);
LinkedHashMap<String, UserEntry> list = new LinkedHashMap<>();
HashMap<X500Principal, UserEntry> lookupMap = new HashMap<>();
try {
readUserList(userFile, list, lookupMap);
System.out.println("\nRepository Server Users:");
if (list.isEmpty()) {
System.out.println(" <No users have been added>");
}
else {
for (String name : list.keySet()) {
System.out.println(" " + name);
}
}
}
catch (IOException e) {
System.out.println("\nFailed to read user file: " + e.getMessage());
}
}
private static void readUserList(File file, Map<String, UserEntry> usersIndexByName,
Map<X500Principal, UserEntry> x500LookupMap) throws IOException {
try (BufferedReader br = new BufferedReader(new FileReader(file))) {
String line;
while ((line = br.readLine()) != null) {
if (line.startsWith("#")) {
continue;
}
try {
StringTokenizer st = new StringTokenizer(line, ":");
UserEntry entry = new UserEntry();
entry.username = st.nextToken();
if (!isValidUserName(entry.username)) {
log.error("Invalid user name, skipping: " + entry.username);
continue;
}
// Password Hash
if (st.hasMoreTokens()) {
entry.passwordHash = st.nextToken().toCharArray();
// Password Time
if (st.hasMoreTokens()) {
try {
String timeStr = st.nextToken();
if ("*".equals(timeStr)) {
entry.passwordTime = NO_EXPIRATION;
}
else {
entry.passwordTime = NumericUtilities.parseHexLong(timeStr);
}
}
catch (NumberFormatException e) {
log.error(
"Invalid password time - forced expiration: " + entry.username);
entry.passwordTime = 0;
}
// Distinguished Name
if (st.hasMoreTokens()) {
String dn = st.nextToken();
if (dn.length() > 0) {
entry.x500User = new X500Principal(dn);
}
}
}
}
usersIndexByName.put(entry.username, entry);
if (entry.x500User != null) {
x500LookupMap.put(entry.x500User, entry);
}
}
catch (NoSuchElementException e) {
// skip entry
}
}
}
}
/**
* Write user data to file.
* @throws IOException
*/
private void writeUserList() throws IOException {
try (BufferedWriter bw = new BufferedWriter(new FileWriter(userFile))) {
for (UserEntry entry : userList.values()) {
bw.write(entry.username);
bw.write(":");
if (entry.passwordHash != null) {
bw.write(entry.passwordHash);
bw.write(':');
if (entry.passwordTime == NO_EXPIRATION) {
bw.write('*');
}
else {
bw.write(Long.toHexString(entry.passwordTime));
}
}
else {
bw.write("*:*");
}
if (entry.x500User != null) {
bw.write(":");
bw.write(entry.x500User.getName());
}
bw.newLine();
}
}
lastUserListChange = userFile.lastModified();
}
/**
* Returns true if the specified user is known to server.
* @param username user name/SID
* @return
*/
public synchronized boolean isValidUser(String username) {
try {
updateUserList(true);
}
catch (IOException e) {
// ignore
}
return userList.containsKey(username);
}
/**
* Verify that the specified password corresponds to the local
* password set for the specified user.
* @param username user name/SID
* @param password password data
* @throws IOException
* @throws FailedLoginException if authentication fails
*/
public synchronized void authenticateUser(String username, char[] password)
throws IOException, FailedLoginException {
if (username == null || password == null) {
throw new FailedLoginException("Invalid authentication data");
}
updateUserList(true);
UserEntry entry = userList.get(username);
if (entry == null) {
throw new FailedLoginException("Unknown user: " + username);
}
if (entry.passwordHash == null ||
entry.passwordHash.length < HashUtilities.MD5_UNSALTED_HASH_LENGTH) {
throw new FailedLoginException("User password not set, must be reset");
}
// Support deprecated unsalted hash
if (entry.passwordHash.length == HashUtilities.MD5_UNSALTED_HASH_LENGTH && Arrays.equals(
HashUtilities.getHash(HashUtilities.MD5_ALGORITHM, password), entry.passwordHash)) {
return;
}
char[] salt = new char[HashUtilities.SALT_LENGTH];
System.arraycopy(entry.passwordHash, 0, salt, 0, HashUtilities.SALT_LENGTH);
if (entry.passwordHash.length == HashUtilities.MD5_SALTED_HASH_LENGTH) {
if (!Arrays.equals(
HashUtilities.getSaltedHash(HashUtilities.MD5_ALGORITHM, salt, password),
entry.passwordHash)) {
throw new FailedLoginException("Incorrect password");
}
}
else if (entry.passwordHash.length == HashUtilities.SHA256_SALTED_HASH_LENGTH) {
if (!Arrays.equals(
HashUtilities.getSaltedHash(HashUtilities.SHA256_ALGORITHM, salt, password),
entry.passwordHash)) {
throw new FailedLoginException("Incorrect password");
}
}
else {
throw new FailedLoginException("User password not set, must be reset");
}
}
/**
* <code>UserEntry</code> class used to hold user data
*/
private static class UserEntry {
private String username;
private X500Principal x500User;
private char[] passwordHash;
private long passwordTime;
}
private PrintWriter getDNLog() throws IOException {
if (dnLogOut == null) {
File dnLog = new File(userFile.getParentFile(), DN_LOG_FILE);
dnLogOut = new PrintWriter(new FileOutputStream(dnLog, true), true);
}
return dnLogOut;
}
/**
* Log a new or unknown X500 principal to facilitate future addition to
* user file.
* @param username user name/SID which corresponds to unknown principal
* @param principal X500 principal data which contains user's distinguished name
*/
public void logUnknownDN(String username, X500Principal principal) {
try {
getDNLog().println(username + "; " + principal);
}
catch (IOException e) {
// ignore
}
}
/*
* Regex: matches if the entire string is alpha, digit, ".", "-", "_", fwd or back slash.
*/
private static final Pattern VALID_USERNAME_REGEX = Pattern.compile("[a-zA-Z0-9.\\-_/\\\\]+");
/**
* Ensures a name only contains valid characters and meets length limitations.
*
* @param s name string
* @return boolean true if valid name, false if not valid
*/
public static boolean isValidUserName(String s) {
return VALID_USERNAME_REGEX.matcher(s).matches() &&
s.length() <= NamingUtilities.MAX_NAME_LENGTH;
}
}
| apache-2.0 |
kcudnik/bridgegooglemaps | Animation.cs | 634 | namespace Bridge.Google.Maps
{
/// <summary>
/// Animations that can be played on a marker. Use the setAnimation method
/// on Marker or the animation option to play an animation.
/// </summary>
[External]
[Enum(Emit.Name)]
[Namespace("google.maps")]
public enum Animation
{
/// <summary>
/// Marker bounces until animation is stopped.
/// </summary>
[Name("BOUNCE")]
Bounce,
/// <summary>
/// Marker falls from the top of the map ending with a small bounce.
/// </summary>
[Name("DROP")]
Drop
}
} | apache-2.0 |
niqo01/monkey | src/internalDebug/java/com/monkeysarmy/fit/ui/debug/AnimationSpeedAdapter.java | 1571 | package com.monkeysarmy.fit.ui.debug;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
import com.monkeysarmy.fit.ui.misc.BindableAdapter;
import static butterknife.ButterKnife.findById;
class AnimationSpeedAdapter extends BindableAdapter<Integer> {
private static final int[] VALUES = {
1, 2, 3, 5, 10
};
public static int getPositionForValue(int value) {
for (int i = 0; i < VALUES.length; i++) {
if (VALUES[i] == value) {
return i;
}
}
return 0; // Default to 1x if something changes.
}
AnimationSpeedAdapter(Context context) {
super(context);
}
@Override public int getCount() {
return VALUES.length;
}
@Override public Integer getItem(int position) {
return VALUES[position];
}
@Override public long getItemId(int position) {
return position;
}
@Override public View newView(LayoutInflater inflater, int position, ViewGroup container) {
return inflater.inflate(android.R.layout.simple_spinner_item, container, false);
}
@Override public void bindView(Integer item, int position, View view) {
TextView tv = findById(view, android.R.id.text1);
if (item == 1) {
tv.setText("Normal");
} else {
tv.setText(item + "x slower");
}
}
@Override
public View newDropDownView(LayoutInflater inflater, int position, ViewGroup container) {
return inflater.inflate(android.R.layout.simple_spinner_dropdown_item, container, false);
}
}
| apache-2.0 |
victorluissantos/OO-PHP | revisao/config/database.class.php | 1048 | <?php
/**
* @see:Classe DataBase, responsável por gerenciar conexão(s) com N host ou base
*/
class Database{
const USER = 'root';
const PASSWORD = 'elaborata';
/*Atributo de controle a instancia unica da classe*/
private static $instancia;
public function __construct()
{
//$this->conecta($usuario);
}
/**
* @see: Metodo Singleton de controle unico a new
*/
public static function conecta()
{
try {
if(!isset(self::$instancia))
{
return self::$instancia = new Database();
}
return self::$instancia;
} catch (Exception $e) {
throw new Exception($e->getMessage());
}
}
/**
* @see: Metodo responsavel por verificar conexão com banco de dados
* @return: (bool)True or False
*/
public function checkConnect()
{
try {
$dsn = 'mysql:host=localhost;dbname=elaborata';
//new PDO = cria objeto do tipo PDO
$bd = new PDO($dsn, self::USER, self::PASSWORD);
if($bd)
{
return true;
}
return false;
} catch (Exception $e) {
throw new Exception($e->getMessage());
}
}
} | apache-2.0 |
odesk/php-odesk | src/oDesk/API/Routers/Activities/Team.php | 4600 | <?php
/**
* oDesk auth library for using with public API by OAuth
* Get oTask/Activity records within a team
*
* @final
* @package oDeskAPI
* @since 05/21/2014
* @copyright Copyright 2014(c) oDesk.com
* @author Maksym Novozhylov <mnovozhilov@odesk.com>
* @license oDesk's API Terms of Use {@link https://developers.odesk.com/api-tos.html}
*/
namespace oDesk\API\Routers\Activities;
use oDesk\API\Debug as ApiDebug;
use oDesk\API\Client as ApiClient;
/**
* Get an oTask/Activity records within a team
*
* @link http://developers.odesk.com/oTasks-API
*/
final class Team extends ApiClient
{
const ENTRY_POINT = ODESK_API_EP_NAME;
/**
* @var Client instance
*/
private $_client;
/**
* Constructor
*
* @param ApiClient $client Client object
*/
public function __construct(ApiClient $client)
{
ApiDebug::p('init ' . __CLASS__ . ' router');
$this->_client = $client;
parent::$_epoint = self::ENTRY_POINT;
}
/**
* Get by type
*
* @param string $company Company ID
* @param string $team Team ID
* @param string $code (Optional) Code(s)
* @return object
*/
private function _getByType($company, $team, $code = null)
{
ApiDebug::p(__FUNCTION__);
$_url = '';
if (!empty($code)) {
$_url = '/' . $code;
}
$response = $this->_client->get('/otask/v1/tasks/companies/' . $company . '/teams/' . $team . '/tasks' . $_url);
ApiDebug::p('found response info', $response);
return $response;
}
/**
* List all oTask/Activity records within a Team
*
* @param string $company Company ID
* @param string $team Team ID
* @return object
*/
public function getList($company, $team)
{
ApiDebug::p(__FUNCTION__);
return $this->_getByType($company, $team);
}
/**
* List all oTask/Activity records within a Team by specified code(s)
*
* @param string $company Company ID
* @param string $team Team ID
* @param string $code Specific code(s)
* @return object
*/
public function getSpecificList($company, $team, $code)
{
ApiDebug::p(__FUNCTION__);
return $this->_getByType($company, $team, $code);
}
/**
* Create an oTask/Activity record within a Team
*
* @param string $company Company ID
* @param string $team Team ID
* @param array $params Parameters
* @return object
*/
public function addActivity($company, $team, $params)
{
ApiDebug::p(__FUNCTION__);
$response = $this->_client->post('/otask/v1/tasks/companies/' . $company . '/teams/' . $team . '/tasks', $params);
ApiDebug::p('found response info', $response);
return $response;
}
/**
* Update specific oTask/Activity record within a Team
*
* @param string $company Company ID
* @param string $team Team ID
* @param string $code Specific code
* @param array $params Parameters
* @return object
*/
public function updateActivity($company, $team, $code, $params)
{
ApiDebug::p(__FUNCTION__);
$response = $this->_client->put('/otask/v1/tasks/companies/' . $company . '/teams/' . $team . '/tasks/' . $code, $params);
ApiDebug::p('found response info', $response);
return $response;
}
/**
* Archive specific oTask/Activity record within a Team
*
* @param string $company Company ID
* @param string $team Team ID
* @param string $code Specific code(s)
* @return object
*/
public function archiveActivities($company, $team, $code)
{
ApiDebug::p(__FUNCTION__);
$response = $this->_client->put('/otask/v1/tasks/companies/' . $company . '/teams/' . $team . '/archive/' . $code);
ApiDebug::p('found response info', $response);
return $response;
}
/**
* Unarchive specific oTask/Activity record within a Team
*
* @param string $company Company ID
* @param string $team Team ID
* @param string $code Specific code(s)
* @return object
*/
public function unarchiveActivities($company, $team, $code)
{
ApiDebug::p(__FUNCTION__);
$response = $this->_client->put('/otask/v1/tasks/companies/' . $company . '/teams/' . $team . '/unarchive/' . $code);
ApiDebug::p('found response info', $response);
return $response;
}
}
| apache-2.0 |
google/tink | cc/prf/hmac_prf_key_manager.cc | 3759 | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
#include "tink/prf/hmac_prf_key_manager.h"
#include <set>
#include "absl/status/status.h"
#include "tink/subtle/common_enums.h"
#include "tink/util/enums.h"
#include "tink/util/input_stream_util.h"
#include "tink/util/status.h"
#include "tink/util/statusor.h"
#include "proto/hmac_prf.pb.h"
namespace crypto {
namespace tink {
namespace {
constexpr int kMinKeySizeInBytes = 16;
}
using google::crypto::tink::HmacPrfKey;
using google::crypto::tink::HmacPrfKeyFormat;
using google::crypto::tink::HmacPrfParams;
using subtle::HashType;
using util::Enums;
using util::Status;
using util::StatusOr;
util::Status HmacPrfKeyManager::ValidateKey(const HmacPrfKey& key) const {
util::Status status = ValidateVersion(key.version(), get_version());
if (!status.ok()) return status;
if (key.key_value().size() < kMinKeySizeInBytes) {
return util::Status(absl::StatusCode::kInvalidArgument,
"Invalid HmacPrfKey: key_value wrong length.");
}
return ValidateParams(key.params());
}
util::Status HmacPrfKeyManager::ValidateKeyFormat(
const HmacPrfKeyFormat& key_format) const {
util::Status status = ValidateVersion(key_format.version(), get_version());
if (!status.ok()) return status;
if (key_format.key_size() < kMinKeySizeInBytes) {
return util::Status(absl::StatusCode::kInvalidArgument,
"Invalid HmacPrfKeyFormat: invalid key_size.");
}
return ValidateParams(key_format.params());
}
crypto::tink::util::StatusOr<HmacPrfKey> HmacPrfKeyManager::CreateKey(
const HmacPrfKeyFormat& key_format) const {
HmacPrfKey key;
key.set_version(get_version());
key.set_key_value(subtle::Random::GetRandomBytes(key_format.key_size()));
*(key.mutable_params()) = key_format.params();
return key;
}
StatusOr<HmacPrfKey> HmacPrfKeyManager::DeriveKey(
const HmacPrfKeyFormat& hmac_prf_key_format,
InputStream* input_stream) const {
crypto::tink::util::Status status = ValidateKeyFormat(hmac_prf_key_format);
if (!status.ok()) return status;
crypto::tink::util::StatusOr<std::string> randomness =
ReadBytesFromStream(hmac_prf_key_format.key_size(), input_stream);
if (!randomness.status().ok()) {
return randomness.status();
}
HmacPrfKey key;
key.set_version(get_version());
*(key.mutable_params()) = hmac_prf_key_format.params();
key.set_key_value(randomness.ValueOrDie());
return key;
}
Status HmacPrfKeyManager::ValidateParams(const HmacPrfParams& params) const {
static const std::set<HashType>* supported_hash_types =
new std::set<HashType>({HashType::SHA1, HashType::SHA224,
HashType::SHA256, HashType::SHA384,
HashType::SHA512});
if (supported_hash_types->find(Enums::ProtoToSubtle(params.hash())) ==
supported_hash_types->end()) {
return ToStatusF(absl::StatusCode::kInvalidArgument,
"Invalid HmacParams: HashType '%s' not supported.",
Enums::HashName(params.hash()));
}
return util::OkStatus();
}
} // namespace tink
} // namespace crypto
| apache-2.0 |