code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2017 Global Security Experts Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require_relative '../app_helper'
RSpec.describe Hostname do
let(:site) { create(:site) }
let(:host) { create(:host, site: site) }
let(:other_host) { create(:host, :other, site: site) }
let(:hostname) { create(:hostname, host: host) }
let(:other_hostname) { create(:hostname, :other, host: host) }
describe 'validation' do
subject { hostname }
it { is_expected.to be_valid }
it { expect(other_hostname).to be_valid }
describe 'host_id' do
it 'should not be nil' do
hostname.host_id = nil
is_expected.not_to be_valid
end
it 'should exist' do
hostname.host_id = Host.maximum(:id) + 1
is_expected.not_to be_valid
end
end
describe 'name' do
it 'should not be blank' do
hostname.name = nil
is_expected.not_to be_valid
hostname.name = ''
is_expected.not_to be_valid
hostname.name = 'test.name'
is_expected.to be_valid
end
it 'should be unique in host' do
hostname.name = other_hostname.name
is_expected.not_to be_valid
end
it 'is allowed to give same name to different hosts' do
hostname.name = other_hostname.name
hostname.host = other_host
is_expected.to be_valid
end
end
end
end
| gsx-lab/caras-framework | spec/models/hostname_spec.rb | Ruby | apache-2.0 | 1,898 |
import React from 'react';
// eslint-disable-next-line import/no-extraneous-dependencies
import { mount } from 'enzyme';
// eslint-disable-next-line import/no-extraneous-dependencies
import { FacetedSearchIcon } from './FacetedSearchIcon.component';
import getDefaultT from '../../translate';
const t = getDefaultT();
describe('FacetedSearchIcon', () => {
it('should render by default', () => {
// given
const props = {
onClick: jest.fn(),
t,
};
// when
const wrapper = mount(<FacetedSearchIcon {...props} />);
// then
expect(wrapper.html()).toMatchSnapshot();
});
it('should render with button active when active props true', () => {
// given
const props = {
active: true,
onClick: jest.fn(),
t,
};
// when
const wrapper = mount(<FacetedSearchIcon {...props} />);
// then
expect(wrapper.find('button[aria-label="Show faceted search"]').prop('className')).toEqual(
'faceted-search-icon theme-faceted-search-icon tc-icon-toggle theme-tc-icon-toggle theme-active active btn btn-link',
);
});
it('should call onClick when trigger click', () => {
// given
const onClick = jest.fn();
const props = {
active: true,
onClick,
t,
};
// when
const wrapper = mount(<FacetedSearchIcon {...props} />);
wrapper.find('button[aria-label="Show faceted search"]').simulate('click');
// then
expect(onClick).toHaveBeenCalled();
expect(onClick.mock.calls.length).toBe(1);
});
it('should render the button in loading mode', () => {
// given
const props = {
loading: true,
onClick: jest.fn(),
t,
};
// when
const wrapper = mount(<FacetedSearchIcon {...props} />);
// then
expect(wrapper.html()).toMatchSnapshot();
});
});
| Talend/ui | packages/faceted-search/src/components/FacetedSearchIcon/FacetedSearchIcon.component.test.js | JavaScript | apache-2.0 | 1,710 |
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
daemonconfig "github.com/docker/docker/daemon/config"
"github.com/docker/docker/oci"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/mount"
volumemounts "github.com/docker/docker/volume/mounts"
"github.com/opencontainers/runc/libcontainer/apparmor"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/devices"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const (
inContainerInitPath = "/sbin/" + daemonconfig.DefaultInitBinary
)
func setResources(s *specs.Spec, r containertypes.Resources) error {
weightDevices, err := getBlkioWeightDevices(r)
if err != nil {
return err
}
readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps)
if err != nil {
return err
}
writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps)
if err != nil {
return err
}
readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps)
if err != nil {
return err
}
writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps)
if err != nil {
return err
}
memoryRes := getMemoryResources(r)
cpuRes, err := getCPUResources(r)
if err != nil {
return err
}
blkioWeight := r.BlkioWeight
specResources := &specs.LinuxResources{
Memory: memoryRes,
CPU: cpuRes,
BlockIO: &specs.LinuxBlockIO{
Weight: &blkioWeight,
WeightDevice: weightDevices,
ThrottleReadBpsDevice: readBpsDevice,
ThrottleWriteBpsDevice: writeBpsDevice,
ThrottleReadIOPSDevice: readIOpsDevice,
ThrottleWriteIOPSDevice: writeIOpsDevice,
},
Pids: &specs.LinuxPids{
Limit: r.PidsLimit,
},
}
if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 {
specResources.Devices = s.Linux.Resources.Devices
}
s.Linux.Resources = specResources
return nil
}
func setDevices(s *specs.Spec, c *container.Container) error {
// Build lists of devices allowed and created within the container.
var devs []specs.LinuxDevice
devPermissions := s.Linux.Resources.Devices
if c.HostConfig.Privileged {
hostDevices, err := devices.HostDevices()
if err != nil {
return err
}
for _, d := range hostDevices {
devs = append(devs, oci.Device(d))
}
devPermissions = []specs.LinuxDeviceCgroup{
{
Allow: true,
Access: "rwm",
},
}
} else {
for _, deviceMapping := range c.HostConfig.Devices {
d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions)
if err != nil {
return err
}
devs = append(devs, d...)
devPermissions = append(devPermissions, dPermissions...)
}
var err error
devPermissions, err = appendDevicePermissionsFromCgroupRules(devPermissions, c.HostConfig.DeviceCgroupRules)
if err != nil {
return err
}
}
s.Linux.Devices = append(s.Linux.Devices, devs...)
s.Linux.Resources.Devices = devPermissions
return nil
}
func (daemon *Daemon) setRlimits(s *specs.Spec, c *container.Container) error {
var rlimits []specs.POSIXRlimit
// We want to leave the original HostConfig alone so make a copy here
hostConfig := *c.HostConfig
// Merge with the daemon defaults
daemon.mergeUlimits(&hostConfig)
for _, ul := range hostConfig.Ulimits {
rlimits = append(rlimits, specs.POSIXRlimit{
Type: "RLIMIT_" + strings.ToUpper(ul.Name),
Soft: uint64(ul.Soft),
Hard: uint64(ul.Hard),
})
}
s.Process.Rlimits = rlimits
return nil
}
func setUser(s *specs.Spec, c *container.Container) error {
uid, gid, additionalGids, err := getUser(c, c.Config.User)
if err != nil {
return err
}
s.Process.User.UID = uid
s.Process.User.GID = gid
s.Process.User.AdditionalGids = additionalGids
return nil
}
func readUserFile(c *container.Container, p string) (io.ReadCloser, error) {
fp, err := c.GetResourcePath(p)
if err != nil {
return nil, err
}
return os.Open(fp)
}
func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) {
passwdPath, err := user.GetPasswdPath()
if err != nil {
return 0, 0, nil, err
}
groupPath, err := user.GetGroupPath()
if err != nil {
return 0, 0, nil, err
}
passwdFile, err := readUserFile(c, passwdPath)
if err == nil {
defer passwdFile.Close()
}
groupFile, err := readUserFile(c, groupPath)
if err == nil {
defer groupFile.Close()
}
execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile)
if err != nil {
return 0, 0, nil, err
}
// todo: fix this double read by a change to libcontainer/user pkg
groupFile, err = readUserFile(c, groupPath)
if err == nil {
defer groupFile.Close()
}
var addGroups []int
if len(c.HostConfig.GroupAdd) > 0 {
addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile)
if err != nil {
return 0, 0, nil, err
}
}
uid := uint32(execUser.Uid)
gid := uint32(execUser.Gid)
sgids := append(execUser.Sgids, addGroups...)
var additionalGids []uint32
for _, g := range sgids {
additionalGids = append(additionalGids, uint32(g))
}
return uid, gid, additionalGids, nil
}
func setNamespace(s *specs.Spec, ns specs.LinuxNamespace) {
for i, n := range s.Linux.Namespaces {
if n.Type == ns.Type {
s.Linux.Namespaces[i] = ns
return
}
}
s.Linux.Namespaces = append(s.Linux.Namespaces, ns)
}
func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error {
userNS := false
// user
if c.HostConfig.UsernsMode.IsPrivate() {
uidMap := daemon.idMapping.UIDs()
if uidMap != nil {
userNS = true
ns := specs.LinuxNamespace{Type: "user"}
setNamespace(s, ns)
s.Linux.UIDMappings = specMapping(uidMap)
s.Linux.GIDMappings = specMapping(daemon.idMapping.GIDs())
}
}
// network
if !c.Config.NetworkDisabled {
ns := specs.LinuxNamespace{Type: "network"}
parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2)
if parts[0] == "container" {
nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer())
if err != nil {
return err
}
ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID())
if userNS {
// to share a net namespace, they must also share a user namespace
nsUser := specs.LinuxNamespace{Type: "user"}
nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID())
setNamespace(s, nsUser)
}
} else if c.HostConfig.NetworkMode.IsHost() {
ns.Path = c.NetworkSettings.SandboxKey
}
setNamespace(s, ns)
}
// ipc
ipcMode := c.HostConfig.IpcMode
switch {
case ipcMode.IsContainer():
ns := specs.LinuxNamespace{Type: "ipc"}
ic, err := daemon.getIpcContainer(ipcMode.Container())
if err != nil {
return err
}
ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID())
setNamespace(s, ns)
if userNS {
// to share an IPC namespace, they must also share a user namespace
nsUser := specs.LinuxNamespace{Type: "user"}
nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID())
setNamespace(s, nsUser)
}
case ipcMode.IsHost():
oci.RemoveNamespace(s, specs.LinuxNamespaceType("ipc"))
case ipcMode.IsEmpty():
// A container was created by an older version of the daemon.
// The default behavior used to be what is now called "shareable".
fallthrough
case ipcMode.IsPrivate(), ipcMode.IsShareable(), ipcMode.IsNone():
ns := specs.LinuxNamespace{Type: "ipc"}
setNamespace(s, ns)
default:
return fmt.Errorf("Invalid IPC mode: %v", ipcMode)
}
// pid
if c.HostConfig.PidMode.IsContainer() {
ns := specs.LinuxNamespace{Type: "pid"}
pc, err := daemon.getPidContainer(c)
if err != nil {
return err
}
ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID())
setNamespace(s, ns)
if userNS {
// to share a PID namespace, they must also share a user namespace
nsUser := specs.LinuxNamespace{Type: "user"}
nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID())
setNamespace(s, nsUser)
}
} else if c.HostConfig.PidMode.IsHost() {
oci.RemoveNamespace(s, specs.LinuxNamespaceType("pid"))
} else {
ns := specs.LinuxNamespace{Type: "pid"}
setNamespace(s, ns)
}
// uts
if c.HostConfig.UTSMode.IsHost() {
oci.RemoveNamespace(s, specs.LinuxNamespaceType("uts"))
s.Hostname = ""
}
return nil
}
func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping {
var ids []specs.LinuxIDMapping
for _, item := range s {
ids = append(ids, specs.LinuxIDMapping{
HostID: uint32(item.HostID),
ContainerID: uint32(item.ContainerID),
Size: uint32(item.Size),
})
}
return ids
}
// Get the source mount point of directory passed in as argument. Also return
// optional fields.
func getSourceMount(source string) (string, string, error) {
// Ensure any symlinks are resolved.
sourcePath, err := filepath.EvalSymlinks(source)
if err != nil {
return "", "", err
}
mi, err := mount.GetMounts(mount.ParentsFilter(sourcePath))
if err != nil {
return "", "", err
}
if len(mi) < 1 {
return "", "", fmt.Errorf("Can't find mount point of %s", source)
}
// find the longest mount point
var idx, maxlen int
for i := range mi {
if len(mi[i].Mountpoint) > maxlen {
maxlen = len(mi[i].Mountpoint)
idx = i
}
}
return mi[idx].Mountpoint, mi[idx].Optional, nil
}
const (
sharedPropagationOption = "shared:"
slavePropagationOption = "master:"
)
// hasMountinfoOption checks if any of the passed any of the given option values
// are set in the passed in option string.
func hasMountinfoOption(opts string, vals ...string) bool {
for _, opt := range strings.Split(opts, " ") {
for _, val := range vals {
if strings.HasPrefix(opt, val) {
return true
}
}
}
return false
}
// Ensure mount point on which path is mounted, is shared.
func ensureShared(path string) error {
sourceMount, optionalOpts, err := getSourceMount(path)
if err != nil {
return err
}
// Make sure source mount point is shared.
if !hasMountinfoOption(optionalOpts, sharedPropagationOption) {
return errors.Errorf("path %s is mounted on %s but it is not a shared mount", path, sourceMount)
}
return nil
}
// Ensure mount point on which path is mounted, is either shared or slave.
func ensureSharedOrSlave(path string) error {
sourceMount, optionalOpts, err := getSourceMount(path)
if err != nil {
return err
}
if !hasMountinfoOption(optionalOpts, sharedPropagationOption, slavePropagationOption) {
return errors.Errorf("path %s is mounted on %s but it is not a shared or slave mount", path, sourceMount)
}
return nil
}
// Get the set of mount flags that are set on the mount that contains the given
// path and are locked by CL_UNPRIVILEGED. This is necessary to ensure that
// bind-mounting "with options" will not fail with user namespaces, due to
// kernel restrictions that require user namespace mounts to preserve
// CL_UNPRIVILEGED locked flags.
func getUnprivilegedMountFlags(path string) ([]string, error) {
var statfs unix.Statfs_t
if err := unix.Statfs(path, &statfs); err != nil {
return nil, err
}
// The set of keys come from https://github.com/torvalds/linux/blob/v4.13/fs/namespace.c#L1034-L1048.
unprivilegedFlags := map[uint64]string{
unix.MS_RDONLY: "ro",
unix.MS_NODEV: "nodev",
unix.MS_NOEXEC: "noexec",
unix.MS_NOSUID: "nosuid",
unix.MS_NOATIME: "noatime",
unix.MS_RELATIME: "relatime",
unix.MS_NODIRATIME: "nodiratime",
}
var flags []string
for mask, flag := range unprivilegedFlags {
if uint64(statfs.Flags)&mask == mask {
flags = append(flags, flag)
}
}
return flags, nil
}
var (
mountPropagationMap = map[string]int{
"private": mount.PRIVATE,
"rprivate": mount.RPRIVATE,
"shared": mount.SHARED,
"rshared": mount.RSHARED,
"slave": mount.SLAVE,
"rslave": mount.RSLAVE,
}
mountPropagationReverseMap = map[int]string{
mount.PRIVATE: "private",
mount.RPRIVATE: "rprivate",
mount.SHARED: "shared",
mount.RSHARED: "rshared",
mount.SLAVE: "slave",
mount.RSLAVE: "rslave",
}
)
// inSlice tests whether a string is contained in a slice of strings or not.
// Comparison is case sensitive
func inSlice(slice []string, s string) bool {
for _, ss := range slice {
if s == ss {
return true
}
}
return false
}
func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error {
userMounts := make(map[string]struct{})
for _, m := range mounts {
userMounts[m.Destination] = struct{}{}
}
// Copy all mounts from spec to defaultMounts, except for
// - mounts overridden by a user supplied mount;
// - all mounts under /dev if a user supplied /dev is present;
// - /dev/shm, in case IpcMode is none.
// While at it, also
// - set size for /dev/shm from shmsize.
defaultMounts := s.Mounts[:0]
_, mountDev := userMounts["/dev"]
for _, m := range s.Mounts {
if _, ok := userMounts[m.Destination]; ok {
// filter out mount overridden by a user supplied mount
continue
}
if mountDev && strings.HasPrefix(m.Destination, "/dev/") {
// filter out everything under /dev if /dev is user-mounted
continue
}
if m.Destination == "/dev/shm" {
if c.HostConfig.IpcMode.IsNone() {
// filter out /dev/shm for "none" IpcMode
continue
}
// set size for /dev/shm mount from spec
sizeOpt := "size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10)
m.Options = append(m.Options, sizeOpt)
}
defaultMounts = append(defaultMounts, m)
}
s.Mounts = defaultMounts
for _, m := range mounts {
if m.Source == "tmpfs" {
data := m.Data
parser := volumemounts.NewParser("linux")
options := []string{"noexec", "nosuid", "nodev", string(parser.DefaultPropagationMode())}
if data != "" {
options = append(options, strings.Split(data, ",")...)
}
merged, err := mount.MergeTmpfsOptions(options)
if err != nil {
return err
}
s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged})
continue
}
mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"}
// Determine property of RootPropagation based on volume
// properties. If a volume is shared, then keep root propagation
// shared. This should work for slave and private volumes too.
//
// For slave volumes, it can be either [r]shared/[r]slave.
//
// For private volumes any root propagation value should work.
pFlag := mountPropagationMap[m.Propagation]
switch pFlag {
case mount.SHARED, mount.RSHARED:
if err := ensureShared(m.Source); err != nil {
return err
}
rootpg := mountPropagationMap[s.Linux.RootfsPropagation]
if rootpg != mount.SHARED && rootpg != mount.RSHARED {
s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED]
}
case mount.SLAVE, mount.RSLAVE:
var fallback bool
if err := ensureSharedOrSlave(m.Source); err != nil {
// For backwards compatibility purposes, treat mounts from the daemon root
// as special since we automatically add rslave propagation to these mounts
// when the user did not set anything, so we should fallback to the old
// behavior which is to use private propagation which is normally the
// default.
if !strings.HasPrefix(m.Source, daemon.root) && !strings.HasPrefix(daemon.root, m.Source) {
return err
}
cm, ok := c.MountPoints[m.Destination]
if !ok {
return err
}
if cm.Spec.BindOptions != nil && cm.Spec.BindOptions.Propagation != "" {
// This means the user explicitly set a propagation, do not fallback in that case.
return err
}
fallback = true
logrus.WithField("container", c.ID).WithField("source", m.Source).Warn("Falling back to default propagation for bind source in daemon root")
}
if !fallback {
rootpg := mountPropagationMap[s.Linux.RootfsPropagation]
if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE {
s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE]
}
}
}
bindMode := "rbind"
if m.NonRecursive {
bindMode = "bind"
}
opts := []string{bindMode}
if !m.Writable {
opts = append(opts, "ro")
}
if pFlag != 0 {
opts = append(opts, mountPropagationReverseMap[pFlag])
}
// If we are using user namespaces, then we must make sure that we
// don't drop any of the CL_UNPRIVILEGED "locked" flags of the source
// "mount" when we bind-mount. The reason for this is that at the point
// when runc sets up the root filesystem, it is already inside a user
// namespace, and thus cannot change any flags that are locked.
if daemon.configStore.RemappedRoot != "" {
unprivOpts, err := getUnprivilegedMountFlags(m.Source)
if err != nil {
return err
}
opts = append(opts, unprivOpts...)
}
mt.Options = opts
s.Mounts = append(s.Mounts, mt)
}
if s.Root.Readonly {
for i, m := range s.Mounts {
switch m.Destination {
case "/proc", "/dev/pts", "/dev/shm", "/dev/mqueue", "/dev":
continue
}
if _, ok := userMounts[m.Destination]; !ok {
if !inSlice(m.Options, "ro") {
s.Mounts[i].Options = append(s.Mounts[i].Options, "ro")
}
}
}
}
if c.HostConfig.Privileged {
// clear readonly for /sys
for i := range s.Mounts {
if s.Mounts[i].Destination == "/sys" {
clearReadOnly(&s.Mounts[i])
}
}
s.Linux.ReadonlyPaths = nil
s.Linux.MaskedPaths = nil
}
// TODO: until a kernel/mount solution exists for handling remount in a user namespace,
// we must clear the readonly flag for the cgroups mount (@mrunalp concurs)
if uidMap := daemon.idMapping.UIDs(); uidMap != nil || c.HostConfig.Privileged {
for i, m := range s.Mounts {
if m.Type == "cgroup" {
clearReadOnly(&s.Mounts[i])
}
}
}
return nil
}
func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error {
if c.BaseFS == nil {
return errors.New("populateCommonSpec: BaseFS of container " + c.ID + " is unexpectedly nil")
}
linkedEnv, err := daemon.setupLinkedContainers(c)
if err != nil {
return err
}
s.Root = &specs.Root{
Path: c.BaseFS.Path(),
Readonly: c.HostConfig.ReadonlyRootfs,
}
if err := c.SetupWorkingDirectory(daemon.idMapping.RootPair()); err != nil {
return err
}
cwd := c.Config.WorkingDir
if len(cwd) == 0 {
cwd = "/"
}
s.Process.Args = append([]string{c.Path}, c.Args...)
// only add the custom init if it is specified and the container is running in its
// own private pid namespace. It does not make sense to add if it is running in the
// host namespace or another container's pid namespace where we already have an init
if c.HostConfig.PidMode.IsPrivate() {
if (c.HostConfig.Init != nil && *c.HostConfig.Init) ||
(c.HostConfig.Init == nil && daemon.configStore.Init) {
s.Process.Args = append([]string{inContainerInitPath, "--", c.Path}, c.Args...)
path := daemon.configStore.InitPath
if path == "" {
path, err = exec.LookPath(daemonconfig.DefaultInitBinary)
if err != nil {
return err
}
}
s.Mounts = append(s.Mounts, specs.Mount{
Destination: inContainerInitPath,
Type: "bind",
Source: path,
Options: []string{"bind", "ro"},
})
}
}
s.Process.Cwd = cwd
s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv)
s.Process.Terminal = c.Config.Tty
s.Hostname = c.Config.Hostname
// There isn't a field in the OCI for the NIS domainname, but luckily there
// is a sysctl which has an identical effect to setdomainname(2) so there's
// no explicit need for runtime support.
s.Linux.Sysctl = make(map[string]string)
if c.Config.Domainname != "" {
s.Linux.Sysctl["kernel.domainname"] = c.Config.Domainname
}
return nil
}
func (daemon *Daemon) createSpec(c *container.Container) (retSpec *specs.Spec, err error) {
s := oci.DefaultSpec()
if err := daemon.populateCommonSpec(&s, c); err != nil {
return nil, err
}
var cgroupsPath string
scopePrefix := "docker"
parent := "/docker"
useSystemd := UsingSystemd(daemon.configStore)
if useSystemd {
parent = "system.slice"
}
if c.HostConfig.CgroupParent != "" {
parent = c.HostConfig.CgroupParent
} else if daemon.configStore.CgroupParent != "" {
parent = daemon.configStore.CgroupParent
}
if useSystemd {
cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID
logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath)
} else {
cgroupsPath = filepath.Join(parent, c.ID)
}
s.Linux.CgroupsPath = cgroupsPath
if err := setResources(&s, c.HostConfig.Resources); err != nil {
return nil, fmt.Errorf("linux runtime spec resources: %v", err)
}
// We merge the sysctls injected above with the HostConfig (latter takes
// precedence for backwards-compatibility reasons).
for k, v := range c.HostConfig.Sysctls {
s.Linux.Sysctl[k] = v
}
p := s.Linux.CgroupsPath
if useSystemd {
initPath, err := cgroups.GetInitCgroup("cpu")
if err != nil {
return nil, err
}
_, err = cgroups.GetOwnCgroup("cpu")
if err != nil {
return nil, err
}
p = filepath.Join(initPath, s.Linux.CgroupsPath)
}
// Clean path to guard against things like ../../../BAD
parentPath := filepath.Dir(p)
if !filepath.IsAbs(parentPath) {
parentPath = filepath.Clean("/" + parentPath)
}
if err := daemon.initCgroupsPath(parentPath); err != nil {
return nil, fmt.Errorf("linux init cgroups path: %v", err)
}
if err := setDevices(&s, c); err != nil {
return nil, fmt.Errorf("linux runtime spec devices: %v", err)
}
if err := daemon.setRlimits(&s, c); err != nil {
return nil, fmt.Errorf("linux runtime spec rlimits: %v", err)
}
if err := setUser(&s, c); err != nil {
return nil, fmt.Errorf("linux spec user: %v", err)
}
if err := setNamespaces(daemon, &s, c); err != nil {
return nil, fmt.Errorf("linux spec namespaces: %v", err)
}
if err := setCapabilities(&s, c); err != nil {
return nil, fmt.Errorf("linux spec capabilities: %v", err)
}
if err := setSeccomp(daemon, &s, c); err != nil {
return nil, fmt.Errorf("linux seccomp: %v", err)
}
if err := daemon.setupContainerMountsRoot(c); err != nil {
return nil, err
}
if err := daemon.setupIpcDirs(c); err != nil {
return nil, err
}
defer func() {
if err != nil {
daemon.cleanupSecretDir(c)
}
}()
if err := daemon.setupSecretDir(c); err != nil {
return nil, err
}
ms, err := daemon.setupMounts(c)
if err != nil {
return nil, err
}
if !c.HostConfig.IpcMode.IsPrivate() && !c.HostConfig.IpcMode.IsEmpty() {
ms = append(ms, c.IpcMounts()...)
}
tmpfsMounts, err := c.TmpfsMounts()
if err != nil {
return nil, err
}
ms = append(ms, tmpfsMounts...)
secretMounts, err := c.SecretMounts()
if err != nil {
return nil, err
}
ms = append(ms, secretMounts...)
sort.Sort(mounts(ms))
if err := setMounts(daemon, &s, c, ms); err != nil {
return nil, fmt.Errorf("linux mounts: %v", err)
}
for _, ns := range s.Linux.Namespaces {
if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled {
target := filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")
s.Hooks = &specs.Hooks{
Prestart: []specs.Hook{{
Path: target,
Args: []string{"libnetwork-setkey", "-exec-root=" + daemon.configStore.GetExecRoot(), c.ID, daemon.netController.ID()},
}},
}
}
}
if apparmor.IsEnabled() {
var appArmorProfile string
if c.AppArmorProfile != "" {
appArmorProfile = c.AppArmorProfile
} else if c.HostConfig.Privileged {
appArmorProfile = "unconfined"
} else {
appArmorProfile = "docker-default"
}
if appArmorProfile == "docker-default" {
// Unattended upgrades and other fun services can unload AppArmor
// profiles inadvertently. Since we cannot store our profile in
// /etc/apparmor.d, nor can we practically add other ways of
// telling the system to keep our profile loaded, in order to make
// sure that we keep the default profile enabled we dynamically
// reload it if necessary.
if err := ensureDefaultAppArmorProfile(); err != nil {
return nil, err
}
}
s.Process.ApparmorProfile = appArmorProfile
}
s.Process.SelinuxLabel = c.GetProcessLabel()
s.Process.NoNewPrivileges = c.NoNewPrivileges
s.Process.OOMScoreAdj = &c.HostConfig.OomScoreAdj
s.Linux.MountLabel = c.MountLabel
// Set the masked and readonly paths with regard to the host config options if they are set.
if c.HostConfig.MaskedPaths != nil {
s.Linux.MaskedPaths = c.HostConfig.MaskedPaths
}
if c.HostConfig.ReadonlyPaths != nil {
s.Linux.ReadonlyPaths = c.HostConfig.ReadonlyPaths
}
return &s, nil
}
func clearReadOnly(m *specs.Mount) {
var opt []string
for _, o := range m.Options {
if o != "ro" {
opt = append(opt, o)
}
}
m.Options = opt
}
// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig
func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) {
ulimits := c.Ulimits
// Merge ulimits with daemon defaults
ulIdx := make(map[string]struct{})
for _, ul := range ulimits {
ulIdx[ul.Name] = struct{}{}
}
for name, ul := range daemon.configStore.Ulimits {
if _, exists := ulIdx[name]; !exists {
ulimits = append(ulimits, ul)
}
}
c.Ulimits = ulimits
}
| dmcgowan/docker | daemon/oci_linux.go | GO | apache-2.0 | 25,547 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { by, element } from 'protractor'
import { BasePage } from './BasePage.po';
import { SideNavigationPage } from './SideNavigationPage.po';
import { ParametersPage } from './ParametersPage.po';
export class PhysLocationsPage extends BasePage {
private btnCreateNewPhysLocation = element(by.name('createPhysLocationButton'));
private txtName = element(by.name('name'));
private txtShortName = element(by.name('shortName'));
private txtAddress = element(by.name('address'));
private txtCity = element(by.name('city'));
private txtState = element(by.name('state'));
private txtZip = element(by.name('zip'));
private txtPoc = element(by.name('poc'));
private txtPhone = element(by.name('phone'));
private txtEmail = element(by.name('email'));
private txtRegion = element(by.name('region'));
private txtComments = element(by.name('comments'));
private txtSearch = element(by.id('physLocationsTable_filter')).element(by.css('label input'));
private mnuPhysLocationsTable = element(by.id('physLocationsTable'));
private btnDelete = element(by.buttonText('Delete'));
private txtConfirmName = element(by.name('confirmWithNameInput'));
private config = require('../config');
private randomize = this.config.randomize;
async OpenPhysLocationPage() {
let snp = new SideNavigationPage();
await snp.NavigateToPhysLocation();
}
async OpenConfigureMenu() {
let snp = new SideNavigationPage();
await snp.ClickTopologyMenu();
}
async CreatePhysLocation(physlocation) {
let result = false;
let basePage = new BasePage();
let snp = new SideNavigationPage();
await snp.NavigateToPhysLocation();
await this.btnCreateNewPhysLocation.click();
await this.txtName.sendKeys(physlocation.Name + this.randomize);
await this.txtShortName.sendKeys(physlocation.ShortName);
await this.txtAddress.sendKeys(physlocation.Address);
await this.txtCity.sendKeys(physlocation.City);
await this.txtState.sendKeys(physlocation.State);
await this.txtZip.sendKeys(physlocation.Zip);
await this.txtPoc.sendKeys(physlocation.Poc);
await this.txtPhone.sendKeys(physlocation.Phone);
await this.txtEmail.sendKeys(physlocation.Email);
await this.txtRegion.sendKeys(physlocation.Region + this.randomize);
await this.txtComments.sendKeys(physlocation.Comments);
await basePage.ClickCreate();
result = await basePage.GetOutputMessage().then(function (value) {
if (physlocation.validationMessage == value) {
return true;
} else {
return false;
}
})
return result;
}
async SearchPhysLocation(physlocationName) {
let result = false;
let snp = new SideNavigationPage();
let name = physlocationName + this.randomize;
await snp.NavigateToPhysLocation();
await this.txtSearch.clear();
await this.txtSearch.sendKeys(name);
await element.all(by.repeater('pl in ::physLocations')).filter(function (row) {
return row.element(by.name('name')).getText().then(function (val) {
return val === name;
});
}).first().click();
}
async UpdatePhysLocation(physlocation) {
let result = false;
let basePage = new BasePage();
switch (physlocation.description) {
case "update physlocation region":
await this.txtRegion.sendKeys(physlocation.Region + this.randomize);
await basePage.ClickUpdate();
break;
default:
result = undefined;
}
if (result = !undefined) {
result = await basePage.GetOutputMessage().then(function (value) {
if (physlocation.validationMessage == value) {
return true;
} else {
return false;
}
})
}
return result;
}
async DeletePhysLocation(physlocation) {
let result = false;
let basePage = new BasePage();
await this.btnDelete.click();
await this.txtConfirmName.sendKeys(physlocation.Name + this.randomize);
await basePage.ClickDeletePermanently();
result = await basePage.GetOutputMessage().then(function (value) {
if (physlocation.validationMessage == value) {
return true;
} else {
return false;
}
})
return result;
}
} | hbeatty/incubator-trafficcontrol | traffic_portal/test/integration/PageObjects/PhysLocationsPage.po.ts | TypeScript | apache-2.0 | 5,036 |
package org.thymoljs.thymol.test.json;
import com.cedarsoftware.util.io.JsonReader;
import com.cedarsoftware.util.io.JsonWriter;
public class JDEREGJsonCodec implements JSONCodec {
// Using the most cool https://github.com/jdereg/json-io
public JDEREGJsonCodec() {
}
@Override
public String encode( Object source ) {
String target = JsonWriter.objectToJson( source );
return target;
}
@Override
public Object decode( String source ) {
Object target = JsonReader.jsonToJava( source );
return target;
}
}
| thymol/thymol.js | src/main/java/org/thymoljs/thymol/test/json/JDEREGJsonCodec.java | Java | apache-2.0 | 533 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using Entities;
namespace HRM.HR_Managment.Employee
{
public partial class List : System.Web.UI.Page
{
protected void Page_Load(object sender, EventArgs e)
{
if (!IsPostBack)
{
GUIHelper.BindEnum2DropDownList(EmployeeStatusDropDownList, typeof(StatusEnum), true);
}
}
}
} | GramozKrasniqi/HRMS | HRM/HR-Managment/Employee/List.aspx.cs | C# | apache-2.0 | 503 |
using System.Web;
using System.Web.Optimization;
namespace QueueBrowser
{
public class BundleConfig
{
public static void RegisterBundles(BundleCollection bundles)
{
bundles.Add(new StyleBundle("~/content/css")
.Include(
"~/Content/bootstrap.css",
"~/Content/bootstrap-theme.css",
"~/Content/angular-toastr.css",
"~/Content/bootstrap-dialog.css",
"~/Content/alert.css",
"~/Content/badge.css",
"~/Content/metric.css",
"~/Content/site.css"
)
);
bundles.Add(new ScriptBundle("~/bundles/modernizr")
.Include(
"~/Scripts/modernizr-{version}.js"
)
);
bundles.Add(new ScriptBundle("~/bundles/scripts")
.Include(
"~/Scripts/jquery-{version}.js",
/* angular */
"~/Scripts/angular.js",
"~/Scripts/angular-animate.js",
"~/Scripts/angular-messages.js",
"~/Scripts/angular-route.js",
"~/Scripts/angular-sanitize.js",
/* bootstrap */
"~/Scripts/bootstrap.js",
/* other */
"~/Scripts/lodash.js",
"~/Scripts/jquery.signalR-{version}.js",
"~/Scripts/angular-signalr-hub.js",
"~/Scripts/moment.js",
"~/Scripts/angular-moment.js",
"~/Scripts/bootstrap-dialog.js",
"~/Scripts/angular-toastr.tpls.js",
"~/Scripts/angular-ui/ui-bootstrap-tpls.js"
)
);
bundles.Add(new ScriptBundle("~/bundles/app")
.Include("~/app/app.js")
.IncludeDirectory("~/app/models/", "*.js")
.IncludeDirectory("~/app/services/", "*.js")
.IncludeDirectory("~/app/queue/", "*.js")
.IncludeDirectory("~/app/logging/", "*.js")
);
#if !DEBUG
BundleTable.EnableOptimizations = true;
#endif
}
}
}
| loresoft/MongoDB.Messaging | Samples/QueueBrowser/App_Start/BundleConfig.cs | C# | apache-2.0 | 2,337 |
/*
* Copyright 2016 Girish Kamath
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rhoadster91.android.siesta.api.capability;
import com.rhoadster91.android.siesta.request.PutRequest;
public interface Puttable<T> {
PutRequest<T> newPutRequest();
boolean isPutSuccess(int responseCode, T response);
}
| rhoadster91/android-siesta | library/src/main/java/com/rhoadster91/android/siesta/api/capability/Puttable.java | Java | apache-2.0 | 837 |
var _ = require('underscore');
var vcr = require('nock-vcr-recorder-mocha');
var Sdk = require('./../../../lib/clc-sdk.js');
var compute = new Sdk('cloud_user', 'cloud_user_password').computeServices();
var assert = require('assert');
var ServerBuilder = require('./../server-builder.js');
vcr.describe('Create server operation [UNIT]', function () {
var DataCenter = compute.DataCenter;
var Server = compute.Server;
var Group = compute.Group;
var builder = new ServerBuilder(compute);
it('Should create new server', function (done) {
this.timeout(10000);
builder.createCentOsVm({
customFields: [
{
name: "Type",
value: 0
}
]
})
.then(builder.deleteServer(done));
});
it('Should create new hyperscale server with anti-affinity policy', function (done) {
this.timeout(10000);
builder.createCentOsVm({
description: "My hyperscale server",
group: {
dataCenter: DataCenter.CA_TORONTO_2,
name: Group.DEFAULT
},
template: {
dataCenter: DataCenter.CA_TORONTO_2,
operatingSystem: {
family: compute.OsFamily.CENTOS,
version: "6",
architecture: compute.Machine.Architecture.X86_64
}
},
machine: {
cpu: 1,
memoryGB: 1,
disks: [
{ size: 2 },
{ path: "/data", size: 4 }
],
antiAffinity: {
nameContains: 'policy'
}
},
type: Server.HYPERSCALE
})
.then(compute.servers().findSingle)
.then(assertThatServerIsHyperscale)
.then(assertThatAntiAffinityPolicyIsSpecified)
.then(builder.deleteServer(done));
});
function assertThatServerIsHyperscale(server) {
assert.equal(server.type, Server.HYPERSCALE);
assert.equal(server.storageType, Server.StorageType.HYPERSCALE);
return server;
}
function assertThatAntiAffinityPolicyIsSpecified(server) {
return compute.policies().antiAffinity()
.findSingle({
dataCenterId: server.locationId.toLowerCase(),
nameContains: 'policy'
})
.then(function(policy) {
var serverLink = _.findWhere(policy.links, {rel: 'server', id: server.id});
assert(!_.isUndefined(serverLink));
return server;
});
}
}); | CenturyLinkCloud/clc-node-sdk | test/compute-services/servers/create-server-test.js | JavaScript | apache-2.0 | 2,709 |
package net.minecraft.src;
import net.minecraft.entity.player.EntityPlayer;
/**
*
* @author Gregory
*/
public class NetServerHandler {
public EntityPlayer playerEntity;
}
| BeyondMinecraft/AbacusCommonsLib | src/main/java/net/minecraft/src/NetServerHandler.java | Java | apache-2.0 | 178 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.io.sensors.rotary_encoder import base_rotary_encoder
from calvin.runtime.south.plugins.async import async
from calvin.runtime.south.plugins.io.gpio import gpiopin
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class RotaryEncoder(base_rotary_encoder.RotaryEncoderBase):
"""
KY040 Rotary Encoder
"""
def __init__(self, node, turn_callback, switch_callback):
super(RotaryEncoder, self).__init__(node, turn_callback, switch_callback)
self._running = False
self._node = node
self._turn_callback = turn_callback
self._switch_callback = switch_callback
config = self._node.attributes.get_private("/hardware/ky040_rotary_encoder")
clk_pin = config.get('clk_pin', None)
dt_pin = config.get('dt_pin', None)
sw_pin = config.get('sw_pin', None)
self._clk_pin = gpiopin.GPIOPin(self._knob, clk_pin, "i", "u")
self._dt_pin = gpiopin.GPIOPin(None, dt_pin, "i", None)
self._sw_pin = gpiopin.GPIOPin(self._switch, sw_pin, "i", "u")
def start(self, frequency=0.5):
try :
self._clk_pin.detect_edge("f")
self._sw_pin.detect_edge("f")
self._running = True
# gpio.add_event_detect(self.echo_pin,
# gpio.FALLING,
# callback=self._echo_callback)
except Exception as e:
_log.error("Could not setup event detect: %r" % (e, ))
def cb_error(self, *args, **kwargs):
_log.error("%r: %r" % (args, kwargs))
def _knob(self):
if self._clk_pin.get_state():
if self._dt_pin.get_state() :
async.call_from_thread(self._turn_callback, -1)
else :
async.call_from_thread(self._turn_callback, 1)
def _switch(self):
async.call_from_thread(self._switch_callback)
def stop(self):
if self._running :
if self.retry and self.retry.iactive() :
self.retry.cancel()
try:
self._sw_pin.stop_detect()
self._dt_pin.stop_detect()
self._clk_pin.stop_detect()
except Exception as e:
_log.warning("Could not remove event detect: %r" % (e,))
self._running = False
| les69/calvin-base | calvin/runtime/south/plugins/io/sensors/rotary_encoder/platform/ky040_rotary_impl/rotary_encoder.py | Python | apache-2.0 | 3,011 |
#!/usr/env python
import sys
#let's parse strings in python!
options = []
with open("src/zopt.ggo.in") as fd:
for l in fd:
if l.startswith("option "):
option = l.split()[1].lstrip('"').rstrip('"')
options.append(option)
man = open('src/zmap.1.ronn').read()
failures = False
for option in options:
if option not in man:
failures = True
sys.stderr.write("ZMap option missing from man file: %s\n" % option)
if failures:
sys.exit(1)
| willscott/zmap | scripts/check_manfile.py | Python | apache-2.0 | 493 |
package com.elastisys.scale.cloudpool.commons.basepool;
import static com.google.common.base.Objects.equal;
import static java.lang.String.format;
import org.hamcrest.Description;
import org.hamcrest.Factory;
import org.hamcrest.Matcher;
import org.hamcrest.TypeSafeMatcher;
import com.elastisys.scale.cloudpool.api.types.ServiceState;
import com.elastisys.scale.cloudpool.commons.basepool.alerts.AlertTopics;
import com.elastisys.scale.commons.net.alerter.Alert;
public class IsSetServiceStateAlert extends TypeSafeMatcher<Alert> {
private final String machineId;
private final ServiceState serviceState;
public IsSetServiceStateAlert(String machineId, ServiceState serviceState) {
this.machineId = machineId;
this.serviceState = serviceState;
}
@Override
public boolean matchesSafely(Alert someAlert) {
String messagePattern = format("Service state set to %s for machine %s", this.serviceState, this.machineId);
return equal(AlertTopics.SERVICE_STATE.name(), someAlert.getTopic())
&& someAlert.getMessage().contains(messagePattern);
}
@Override
public void describeTo(Description description) {
description.appendText(String.format("service state alert (%s, %s)", this.machineId, this.serviceState));
}
@Factory
public static <T> Matcher<Alert> isSetServiceStateAlert(String machineId, ServiceState serviceState) {
return new IsSetServiceStateAlert(machineId, serviceState);
}
} | Eeemil/scale.cloudpool | commons/src/test/java/com/elastisys/scale/cloudpool/commons/basepool/IsSetServiceStateAlert.java | Java | apache-2.0 | 1,505 |
package com.mageddo.config;
import org.junit.jupiter.api.extension.BeforeTestExecutionCallback;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.junit.jupiter.api.extension.TestInstancePostProcessor;
import org.junit.platform.commons.support.AnnotationSupport;
import io.micronaut.test.annotation.MicronautTest;
import static com.mageddo.config.ApplicationContextUtils.context;
public class DatabaseConfiguratorExtension implements TestInstancePostProcessor, BeforeTestExecutionCallback {
@Override
public void postProcessTestInstance(Object o, ExtensionContext extensionContext) throws Exception {
AnnotationSupport.findAnnotation(extensionContext.getRequiredTestClass(), MicronautTest.class);
}
@Override
public void beforeTestExecution(ExtensionContext extensionContext) throws Exception {
final DatabaseConfigurator databaseConfigurator = context().getBean(DatabaseConfigurator.class);
databaseConfigurator.migrate();
}
}
| mageddo/bookmark-notes | src/test/java/com/mageddo/config/DatabaseConfiguratorExtension.java | Java | apache-2.0 | 974 |
#coding=UTF-8
'''
Created on 2011-7-6
@author: Administrator
'''
from urlparse import urlparse
import cookielib
from pyquery.pyquery import PyQuery #@UnresolvedImport
import re
import datetime #@UnusedImport
import urllib2
from lxml import etree #@UnresolvedImport
from lxml.cssselect import CSSSelector #@UnresolvedImport
import simplejson as js #@UnusedImport @UnresolvedImport
from config import housetype, checkPath, makePath,fitment,toward,deposit
import threading
from BeautifulSoup import BeautifulSoup #@UnresolvedImport
import time
import gc
from jjrlog import msglogger, LinkLog
from common import postHost
homepath="e:\\home\\spider\\"
gc.enable()
class LinkCrawl(object):
def __init__(self,citycode="",kind="",upc="5",st="3"):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.upc=upc
self.endtime=str(datetime.date.today() -datetime.timedelta(days=7))
self.clinks=[]
self.pn=[]
self.citycode=citycode
self.baseUrl="http://%s.ganji.com"%self.citycode
self.kind=kind
if kind=="1":#出售
self.urlpath="/fang5/a1u2%s/"
self.folder="sell\\"
elif kind=="2":#出租
self.urlpath="/fang1/u2%s/"
self.folder="rent\\"
elif kind=="3":#求购
self.urlpath="/fang4/u2f0/a1%s/"
self.folder="buy\\"
elif kind=="4":#求租
self.urlpath="/fang2/u2f0/a1%s/"
self.folder="req\\"
def __getAllNeedLinks(self):
cond=True
idx=0
checkit="0"
while cond:
url=self.baseUrl+self.urlpath%("f"+str(idx*32))
#url="http://gz.ganji.com/fang2/u2f0/a1f768/"
# print url
try:
req=urllib2.Request(url, None, self.header)
p=self.br.open(req).read()
except:
continue
else:
check=PyQuery(p)("ul.pageLink li a.c").text()
if check==None or check==checkit:
cond=False
break
else:
checkit=check
links=PyQuery(p)("div.list dl")
p=None
# print len(links)
for link in links:
lk=self.baseUrl+PyQuery(link)(" a.list_title").attr("href")
# print lk
if self.kind=="3" or self.kind=="4":
tm=PyQuery(link)("dd span.time").text()
if re.match('''\d{2}-\d{2}''', tm):
Y=int(time.strftime('%Y', time.localtime()))
tm="%s-%s"%(Y,tm.strip())
if tm<self.endtime:
cond=False
break
elif "分钟" in tm:
pass
elif "小时" in tm:
pass
else:
cond=False
break
if not checkPath(homepath,self.folder,lk):
LinkLog.info("%s|%s"%(self.kind,lk))
try:
getContent(lk,self.citycode,self.kind,self.upc)
except Exception,e:print "ganji getContent Exception %s"%e
# fetch_quere.put({"mod":"ganji","link":lk,"citycode":self.citycode,"kind":self.kind})
# if lk not in self.clinks:
# self.clinks.append(lk)
idx=idx+1
# print len(self.clinks)
def runme(self):
#self.__initPageNum()
self.__getAllNeedLinks()
class ContentCrawl(object):
def __init__(self,links,citycode,kind,upc):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.pdb={}
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.urls=links
self.kind=kind
self.fd={}
self.citycode=citycode
self.upc=upc
if kind=="1":
self.folder="sell\\"
elif kind=="2":
self.folder="rent\\"
elif kind=="3":
self.folder="buy\\"
else:
self.folder="req\\"
#js resgx
self.xiaoqu_regex="xiaoqu : '(.*?)',"
self.address_regex="address : '(.*?)',"
self.house_room_regex="(\d+)室"
self.house_hall_regex="(\d+)厅"
self.house_toilet_regex="(\d+)卫"
self.house_desc_regex="房屋概况</p>(.*?)</p>"
self.house_floor_regex="<li>楼层: 第(\d+)层/总(\d+)层</li>"
self.house_totalarea_regex="<li>面积: (\d+) ㎡</li>"
self.house_totalarea_regex_qiu="(\d+)㎡"
self.house_type_regex3="<li>户型: (.*)</li>"
self.house_toward_regex="<li>朝向: (.*)</li>"
self.house_type_regex="<li>类型: (.*)</li>"
self.cityarea_regex="<li>区域:([\s\S]*?)</li>"
self.house_age_regex="<li>房龄: (\d+) 年</li>"
self.house_fitment_regex="<li>装修: (.*)</li>"
self.house_support_regex="<li>配置: (.*) </li>"
self.house_price_regex="<li>售价: <span>(.*)</span>.*</li>"
self.house_price_regex_2="<li>租金: <span>(.*)</span>.*</li>"
self.borough_name_regex="<li>小区:(.*)</li>"
self.house_deposit_regex="<li>租金: (.*)</li>"
self.house_price_regex_zu = "<li>期望租金: (.*)</li>"
self.borough_name_regex_reg = "<li>期望小区: (.*)</li>"
self.house_addr_regex_reg = "<li>小区地址:(.*)</li>"
self.house_price_regex_gou = "<li>期望售价: (.*)</li>"
def __addText(self,tag, no_tail=False):
text = []
if tag.text:
text.append(tag.text)
for child in tag.getchildren():
text.append(self.__addText(child))
if not no_tail and tag.tail:
text.append(tag.tail)
return "".join(text)
def getText(self,html):
text=[]
for tag in html:
text.append(self.__addText(tag, no_tail=True))
return ' '.join([t.strip() for t in text if t.strip()])
def mayGetIt(self,page):
try:
href=PyQuery(page)("a.userHistory").attr("href")
if href==None:
return False
href="http://%s.ganji.com%s"%(self.citycode,href)
resp = urllib2.urlopen(urllib2.Request(href, None, self.header)).read()
trs=PyQuery(resp)("table.tel_list tr")
except:
return True
# print "user list-------->%s| %s"%((len(trs)-1),self.urls)
if len(trs)-1>int(self.upc):
return True
else:
return False
def sell(self,url):
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
return
tree = etree.HTML(response)
soup =BeautifulSoup(response)
self.fd['house_flag'] = 1
self.fd['belong']=0
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
if re.search(self.house_floor_regex, response):
house_floor=re.search(self.house_floor_regex, response).group(1)
house_topfloor=re.search(self.house_floor_regex, response).group(2)
self.fd['house_floor'] = house_floor
self.fd['house_topfloor'] = house_topfloor
else:
self.fd['house_floor'] = None
self.fd['house_topfloor'] = None
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
#类型
if re.search(self.house_type_regex, response):
house_type=re.search(self.house_type_regex, response).group(1)
self.fd['house_type'] = housetype(house_type)
else:
self.fd['house_type'] = None
if re.search(self.house_price_regex, response):
house_price=re.search(self.house_price_regex, response).group(1)
if house_price=="面议":
house_price="0"
self.fd['house_price'] = house_price
else:
self.fd['house_price'] = None
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#地址
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#朝向
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = None
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def buy(self,url):
self.fd['city'] = self.citycode
self.fd['house_flag'] = 3
# self.fd['belong']="1"
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
return
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
self.fd['house_type'] = 0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
if re.search(self.house_totalarea_regex_qiu, response):
house_totalarea=re.search(self.house_totalarea_regex_qiu, response).group(1)
self.fd['house_totalarea'] = house_totalarea
self.fd['house_totalarea_max'] = house_totalarea
self.fd['house_totalarea_min'] = house_totalarea
else:
self.fd['house_totalarea'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
if re.search(self.house_price_regex_gou, response):
house_price_zu = re.search(self.house_price_regex_gou, response).group(1)
house_price_zu = house_price_zu.replace('万','')
if house_price_zu.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price_zu.replace('以上','')
self.fd['house_price'] = self.fd['house_price_min']
elif house_price_zu.find("以下") != -1:
self.fd['house_price_max'] = house_price_zu.replace('以下','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = self.fd['house_price_max']
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = house_price_zu.split('-')[1]
self.fd['house_price_min'] = house_price_zu.split('-')[0]
self.fd['house_price'] = house_price_zu.split('-')[1]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def rent(self,url):
self.fd['city'] = urlparse(url)[1].replace('.ganji.com',"")
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
return
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_flag'] = 2
self.fd['house_type'] = 0
self.fd['house_floor'] = ""
self.fd['house_topfloor'] = ""
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
if re.search(self.house_price_regex_2, response):
house_price=re.search(self.house_price_regex_2, response).group(1)
if house_price=="面议":
house_price="0"
self.fd['house_price'] = house_price
else:
self.fd['house_price'] = None
# house_price=tree.xpath("/html/body/div[2]/div/div/ul/li/span") and tree.xpath("/html/body/div[2]/div/div/ul/li/span")[0].text.strip() or None
# v['house_price'] = house_price
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#地址
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#朝向
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = None
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
if re.search(self.house_deposit_regex, response):
house_deposit=re.search(self.house_deposit_regex, response).group(1)
self.fd['house_deposit'] = deposit(house_deposit)
else:
self.fd['house_deposit'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def require(self,url):
self.fd['city'] = urlparse(url)[1].replace('.ganji.com',"")
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
return
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_flag'] = 4
self.fd['house_type'] = 0
self.fd['house_floor'] = ""
self.fd['house_topfloor'] = ""
self.fd['house_totalarea']=0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
self.fd['house_deposit'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
self.fd['house_totalarea'] = 0
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search(self.house_price_regex_zu, response):
house_price_zu = re.search(self.house_price_regex_zu, response).group(1)
house_price_zu = house_price_zu.replace('元/月','')
if house_price_zu.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price_zu.replace('以上','')
self.fd['house_price'] = house_price_zu.replace('以上','')
elif house_price_zu.find("以下") != -1:
self.fd['house_price_max'] = house_price_zu.replace('以下','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = house_price_zu.replace('以下','')
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = house_price_zu.split('-')[1]
self.fd['house_price_min'] = house_price_zu.split('-')[0]
self.fd['house_price'] = house_price_zu.split('-')[1]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def extractDict(self):
if checkPath(homepath,self.folder,self.urls):
pass
else:
try:
if self.kind=="1":
self.sell(self.urls)
elif self.kind=="2":
self.rent(self.urls)
elif self.kind=="3":
self.buy(self.urls)
else:
self.require(self.urls)
makePath(homepath,self.folder,self.urls)
#超过七天
# if (time.time() -self.fd["posttime"]) > 7*24*36000:return
except Exception,e:
msglogger.info("%s 链接采集异常"%self.urls)
# print "%s||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||"%self.urls
self.fd["c"]="houseapi"
self.fd["a"]="savehouse"
self.fd["is_checked"] = 1
self.fd["web_flag"] = "gj"
print "%s %s %s %s %s"%(("%s.soufun.com"% self.citycode),self.citycode, self.kind ,time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())), self.urls)
return self.fd
if not self.fd["is_checked"]:
for i in self.fd.items():
print i[0],i[1]
print "*"*80
# if len(self.fd)==7 or len(self.fd)==17:
# print "#####################################"
# continue
# req=urllib2.Request("http://site.jjr360.com/app.php", urllib.urlencode(self.fd))
# p=self.br.open(req).read().strip()
# print p.decode('gbk')
# print "*"*80
class fetchData(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
clinks=lc.runme()
cc=ContentCrawl(clinks,self.d["citycode"],self.d["kind"])
cc.extractDict()
class getLinksThread(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
gc.enable()
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
lc.runme()
del gc.garbage[:]
def getLinks(d):
lc=LinkCrawl(d["citycode"],d["kind"],d["st1"])
while True:
lc.runme()
del gc.garbage[:]
time.sleep(int(d["st2"]))
def getContent(clinks,citycode,kind,upc):
# return
cc=ContentCrawl(clinks,citycode,kind,upc)
fd=cc.extractDict()
res=""
try:
res=postHost(fd)
except Exception,e:
res=e
print res
msglogger.info("%s|%s|%s"%(clinks,res,fd))
del gc.garbage[:]
if __name__=="__main__":
# lc=LinkCrawl(citycode="su",kind="1")
# lc.runme()#
#url1 = "http://su.ganji.com/fang5/11071015_233901.htm"
#url2 = "http://su.ganji.com/fang1/11071017_418972.htm"
#url3 = "http://su.ganji.com/fang4/11062413_4152.htm"
#url4 = "http://su.ganji.com/fang2/11070900_21214.htm"
cc=ContentCrawl("http://su.ganji.com/fang2/11071417_21820.htm",citycode="su",kind="4")
cc.extractDict()
# while 1:
# for i in range(1,5):
# k = "%s" % str(i)
# try:
# lc=LinkCrawl(citycode="su",kind=k)
# clinks=lc.runme()
# cc=ContentCrawl(clinks,citycode="su",kind=k)
# cc.extractDict()
# except:
# pass
| ptphp/PyLib | src/webpy1/src/jjrspider/ganji.py | Python | apache-2.0 | 40,700 |
/*
* Copyright 2016, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.architecture.blueprints.todoapp.tasks.domain.model;
import android.arch.persistence.room.ColumnInfo;
import android.arch.persistence.room.Entity;
import android.arch.persistence.room.Ignore;
import android.arch.persistence.room.PrimaryKey;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import com.google.common.base.Objects;
import com.google.common.base.Strings;
import java.util.UUID;
/**
* Immutable model class for a Task.
*/
@Entity(tableName = "tasks")
public final class Task {
@PrimaryKey
@NonNull
@ColumnInfo(name = "entryid")
private final String mId;
@Nullable
@ColumnInfo(name = "title")
private final String mTitle;
@Nullable
@ColumnInfo(name = "description")
private final String mDescription;
@ColumnInfo(name = "completed")
private final boolean mCompleted;
/**
* Use this constructor to create a new active Task.
*
* @param title title of the task
* @param description description of the task
*/
@Ignore
public Task(@Nullable String title, @Nullable String description) {
this(title, description, UUID.randomUUID().toString(), false);
}
/**
* Use this constructor to create an active Task if the Task already has an id (copy of another
* Task).
*
* @param title title of the task
* @param description description of the task
* @param id id of the task
*/
@Ignore
public Task(@Nullable String title, @Nullable String description, @NonNull String id) {
this(title, description, id, false);
}
/**
* Use this constructor to create a new completed Task.
*
* @param title title of the task
* @param description description of the task
* @param completed true if the task is completed, false if it's active
*/
@Ignore
public Task(@Nullable String title, @Nullable String description, boolean completed) {
this(title, description, UUID.randomUUID().toString(), completed);
}
/**
* Use this constructor to specify a completed Task if the Task already has an id (copy of
* another Task).
*
* @param title title of the task
* @param description description of the task
* @param id id of the task
* @param completed true if the task is completed, false if it's active
*/
public Task(@Nullable String title, @Nullable String description,
@NonNull String id, boolean completed) {
mId = id;
mTitle = title;
mDescription = description;
mCompleted = completed;
}
@NonNull
public String getId() {
return mId;
}
@Nullable
public String getTitle() {
return mTitle;
}
@Nullable
public String getTitleForList() {
if (!Strings.isNullOrEmpty(mTitle)) {
return mTitle;
} else {
return mDescription;
}
}
@Nullable
public String getDescription() {
return mDescription;
}
public boolean isCompleted() {
return mCompleted;
}
public boolean isActive() {
return !mCompleted;
}
public boolean isEmpty() {
return Strings.isNullOrEmpty(mTitle) &&
Strings.isNullOrEmpty(mDescription);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Task task = (Task) o;
return Objects.equal(mId, task.mId) &&
Objects.equal(mTitle, task.mTitle) &&
Objects.equal(mDescription, task.mDescription);
}
@Override
public int hashCode() {
return Objects.hashCode(mId, mTitle, mDescription);
}
@Override
public String toString() {
return "Task with title " + mTitle;
}
}
| sdsxer/mmdiary | client/source/mmdiary/app/src/main/java/com/example/android/architecture/blueprints/todoapp/tasks/domain/model/Task.java | Java | apache-2.0 | 4,578 |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.core.artifact;
import com.facebook.buck.core.rules.analysis.action.ActionAnalysisDataKey;
/**
* Represents an {@link Artifact} that is just declared by a rule implemention, with no {@link
* com.facebook.buck.core.rules.actions.Action}s attached to build it.
*
* <p>This is not intended to be used by users, but only by the build engine.
*/
public interface DeclaredArtifact extends Artifact {
/**
* Binds an corresponding {@link com.facebook.buck.core.rules.actions.Action} as represented by
* the {@link ActionAnalysisDataKey} to this {@link Artifact}.
*
* @param key the {@link ActionAnalysisDataKey} to attach to this {@link Artifact}
* @return the {@link BuildArtifact} of this instance after attaching the {@link
* ActionAnalysisDataKey}.
*/
BuildArtifact materialize(ActionAnalysisDataKey key);
/** Intended for framework use only, comparing order for {@link DeclaredArtifact} */
int compareDeclared(DeclaredArtifact artifact);
}
| JoelMarcey/buck | src/com/facebook/buck/core/artifact/DeclaredArtifact.java | Java | apache-2.0 | 1,622 |
package org.spanna.material;
import org.spanna.block.BlockFace;
import org.spanna.Material;
/**
* MaterialData for torches
*/
public class Torch extends SimpleAttachableMaterialData {
public Torch() {
super(Material.TORCH);
}
/**
*
* @deprecated Magic value
*/
@Deprecated
public Torch(final int type) {
super(type);
}
public Torch(final Material type) {
super(type);
}
/**
*
* @deprecated Magic value
*/
@Deprecated
public Torch(final int type, final byte data) {
super(type, data);
}
/**
*
* @deprecated Magic value
*/
@Deprecated
public Torch(final Material type, final byte data) {
super(type, data);
}
/**
* Gets the face that this block is attached on
*
* @return BlockFace attached to
*/
public BlockFace getAttachedFace() {
byte data = getData();
switch (data) {
case 0x1:
return BlockFace.WEST;
case 0x2:
return BlockFace.EAST;
case 0x3:
return BlockFace.NORTH;
case 0x4:
return BlockFace.SOUTH;
case 0x5:
default:
return BlockFace.DOWN;
}
}
public void setFacingDirection(BlockFace face) {
byte data;
switch (face) {
case EAST:
data = 0x1;
break;
case WEST:
data = 0x2;
break;
case SOUTH:
data = 0x3;
break;
case NORTH:
data = 0x4;
break;
case UP:
default:
data = 0x5;
}
setData(data);
}
@Override
public Torch clone() {
return (Torch) super.clone();
}
}
| SpannaProject/SpannaAPI | src/main/java/org/spanna/material/Torch.java | Java | apache-2.0 | 1,816 |
/*
* Copyright 2013 Philip Schiffer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.psdev.slf4j.android.logger;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import org.hamcrest.CoreMatchers;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.robolectric.RobolectricTestRunner;
import org.robolectric.annotation.Config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import android.util.Log;
@RunWith(RobolectricTestRunner.class)
@Config(manifest = Config.NONE, shadows = { EnhancedShadowLog.class })
public class AndroidLoggerAdapterTest {
private Logger mLogger;
@Before
public void setUp() throws Exception {
mLogger = LoggerFactory.getLogger(AndroidLoggerAdapterTest.class);
EnhancedShadowLog.stream = System.out;
}
@Test
public void testInitialization() throws Exception {
assertEquals("should have read correct log tag from properties", "TestLogTag",
AndroidLoggerAdapter.getLogTag());
assertEquals("should have correct name", AndroidLoggerAdapterTest.class.getName(), mLogger.getName());
assertEquals("should have correct log level", LogLevel.TRACE, AndroidLoggerAdapter.getLogLevel());
}
@Test
public void testIsTraceEnabled() throws Exception {
assertTrue("trace should be enabled", mLogger.isTraceEnabled());
}
@Test
public void testTrace() throws Exception {
mLogger.trace("test trace");
assertLog(Log.VERBOSE, "test trace");
}
@Test
public void testTraceWithArg() throws Exception {
mLogger.trace("test trace {}", "argument");
assertLog(Log.VERBOSE, "test trace argument");
}
@Test
public void testTraceWithTwoArgs() throws Exception {
mLogger.trace("test trace {} {}", "argument", "argument2");
assertLog(Log.VERBOSE, "test trace argument argument2");
}
@Test
public void testTraceWithVarArgs() throws Exception {
mLogger.trace("test trace {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.VERBOSE, "test trace argument argument2 argument3");
}
@Test
public void testTraceWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test trace exception");
mLogger.trace("test trace {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.VERBOSE, "test trace argument argument2 argument3", exception);
}
@Test
public void testTraceWithThrowable() throws Exception {
final Exception exception = new Exception("test trace exception");
mLogger.trace("test trace", exception);
assertLog(Log.VERBOSE, "test trace", exception);
}
@Test
public void testIsDebugEnabled() throws Exception {
assertTrue("debug should be enabled", mLogger.isDebugEnabled());
}
@Test
public void testDebug() throws Exception {
mLogger.debug("test debug");
assertLog(Log.DEBUG, "test debug");
}
@Test
public void testDebugWithArg() throws Exception {
mLogger.debug("test debug {}", "argument");
assertLog(Log.DEBUG, "test debug argument");
}
@Test
public void testDebugWithTwoArgs() throws Exception {
mLogger.debug("test debug {} {}", "argument", "argument2");
assertLog(Log.DEBUG, "test debug argument argument2");
}
@Test
public void testDebugWithVarArgs() throws Exception {
mLogger.debug("test debug {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.DEBUG, "test debug argument argument2 argument3");
}
@Test
public void testDebugWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test debug exception");
mLogger.debug("test debug {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.DEBUG, "test debug argument argument2 argument3", exception);
}
@Test
public void testDebugWithThrowable() throws Exception {
final Exception exception = new Exception("test debug exception");
mLogger.debug("test debug", exception);
assertLog(Log.DEBUG, "test debug", exception);
}
@Test
public void testIsInfoEnabled() throws Exception {
assertTrue("info should be enabled", mLogger.isInfoEnabled());
}
@Test
public void testInfo() throws Exception {
mLogger.info("test info");
assertLog(Log.INFO, "test info");
}
@Test
public void testInfoWithArg() throws Exception {
mLogger.info("test info {}", "argument");
assertLog(Log.INFO, "test info argument");
}
@Test
public void testInfoWithTwoArgs() throws Exception {
mLogger.info("test info {} {}", "argument", "argument2");
assertLog(Log.INFO, "test info argument argument2");
}
@Test
public void testInfoWithVarArgs() throws Exception {
mLogger.info("test info {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.INFO, "test info argument argument2 argument3");
}
@Test
public void testInfoWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test info exception");
mLogger.info("test info {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.INFO, "test info argument argument2 argument3", exception);
}
@Test
public void testInfoWithThrowable() throws Exception {
final Exception exception = new Exception("test info exception");
mLogger.info("test info", exception);
assertLog(Log.INFO, "test info", exception);
}
@Test
public void testIsWarnEnabled() throws Exception {
assertTrue("warn should be enabled", mLogger.isWarnEnabled());
}
@Test
public void testWarn() throws Exception {
mLogger.warn("test info");
assertLog(Log.WARN, "test info");
}
@Test
public void testWarnWithArg() throws Exception {
mLogger.warn("test warn {}", "argument");
assertLog(Log.WARN, "test warn argument");
}
@Test
public void testWarnWithTwoArgs() throws Exception {
mLogger.warn("test warn {} {}", "argument", "argument2");
assertLog(Log.WARN, "test warn argument argument2");
}
@Test
public void testWarnWithVarArgs() throws Exception {
mLogger.warn("test warn {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.WARN, "test warn argument argument2 argument3");
}
@Test
public void testWarnWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test warn exception");
mLogger.warn("test warn {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.WARN, "test warn argument argument2 argument3", exception);
}
@Test
public void testWarnWithThrowable() throws Exception {
final Exception exception = new Exception("test warn exception");
mLogger.warn("test warn", exception);
assertLog(Log.WARN, "test warn", exception);
}
@Test
public void testIsErrorEnabled() throws Exception {
assertTrue("error should be enabled", mLogger.isErrorEnabled());
}
@Test
public void testError() throws Exception {
mLogger.error("test error");
assertLog(Log.ERROR, "test error");
}
@Test
public void testErrorWithArg() throws Exception {
mLogger.error("test error {}", "argument");
assertLog(Log.ERROR, "test error argument");
}
@Test
public void testErrorWithTwoArgs() throws Exception {
mLogger.error("test error {} {}", "argument", "argument2");
assertLog(Log.ERROR, "test error argument argument2");
}
@Test
public void testErrorWithVarArgs() throws Exception {
mLogger.error("test error {} {} {}", "argument", "argument2", "argument3");
assertLog(Log.ERROR, "test error argument argument2 argument3");
}
@Test
public void testErrorWithArgAndThrowable() throws Exception {
final Exception exception = new Exception("test error exception");
mLogger.error("test error {} {} {}", "argument", "argument2", "argument3", exception);
assertLog(Log.ERROR, "test error argument argument2 argument3", exception);
}
@Test
public void testErrorWithThrowable() throws Exception {
final Exception exception = new Exception("test error exception");
mLogger.error("test error", exception);
assertLog(Log.ERROR, "test error", exception);
}
@Test
public void testInnerclassMatching() throws Exception {
final InnerClassTest innerClassTest = new InnerClassTest();
innerClassTest.doSomething();
assertLog(Log.INFO, "inner class match");
assertThat("should contain correct class name", EnhancedShadowLog.getLogs().get(0).msg,
CoreMatchers.containsString("InnerClassTest"));
}
@After
public void tearDown() throws Exception {
EnhancedShadowLog.reset();
}
// Helper
private static void assertLog(final int expectedLogLevel, final String expectedContainedText) {
assertLog(expectedLogLevel, expectedContainedText, null);
}
private static void assertLog(final int expectedLogLevel, final String expectedContainedText,
final Throwable expectedThrowable) {
assertEquals("should have logged 1 message", 1L, EnhancedShadowLog.getLogs().size());
final EnhancedShadowLog.LogItem logItem = EnhancedShadowLog.getLogs().get(0);
assertEquals("should have correct type", expectedLogLevel, logItem.type);
assertThat("should contain message", logItem.msg, CoreMatchers.containsString(expectedContainedText));
assertThat("should contain class", logItem.msg, CoreMatchers.containsString(
AndroidLoggerAdapterTest.class.getSimpleName()));
assertEquals("should have correct log tag", "TestLogTag", logItem.tag);
if (expectedThrowable != null) {
assertEquals("should have logged the correct throwable", expectedThrowable, logItem.throwable);
}
}
class InnerClassTest {
public void doSomething() {
mLogger.info("inner class match");
}
}
}
| PSDev/slf4j-android-logger | src/test/java/de/psdev/slf4j/android/logger/AndroidLoggerAdapterTest.java | Java | apache-2.0 | 11,065 |
package com.yan.leetcode;
import org.junit.Test;
public class RemoveDuplicatesFromSortedList {
@Test
public void test() {
ListNode head = new ListNode(1);
head.next = new ListNode(1);
// head.next.next = new ListNode(2);
System.out.println(deleteDuplicates2(head));
}
public ListNode deleteDuplicates(ListNode head) {
if (head == null || head.next == null) {
return head;
}
ListNode index = head;
while (index.next != null) {
if (index.next.val == index.val) {
index.next = index.next.next;
} else {
index = index.next;
}
}
return head;
}
public ListNode deleteDuplicates2(ListNode head) {
if (head == null) {
return head;
}
ListNode newHead = new ListNode(Integer.MAX_VALUE);
ListNode index = newHead;
while (head != null) {
if (head.val != index.val) {
index.next = head;
index = index.next;
}
head = head.next;
}
index.next = null;
return newHead.next;
}
}
| ustbyjy/leetcode | src/main/java/com/yan/leetcode/RemoveDuplicatesFromSortedList.java | Java | apache-2.0 | 949 |
/*
* Copyright 2017 Exorath
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.exorath.exoHUD;
import java.util.List;
/**
* Created by toonsev on 9/21/2016.
*/
public class SimpleHUDPackage implements HUDPackage {
private List<HUDText> texts;
public SimpleHUDPackage(List<HUDText> texts) {
this.texts = texts;
}
@Override
public List<HUDText> getTexts() {
return texts;
}
}
| Exorath/ExoHUD | src/main/java/com/exorath/exoHUD/SimpleHUDPackage.java | Java | apache-2.0 | 973 |
/*
* Copyright 2015 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc;
import com.google.common.base.Preconditions;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.net.SocketAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* A group of {@link SocketAddress}es that are considered equivalent when channel makes connections.
*
* <p>Usually the addresses are addresses resolved from the same host name, and connecting to any of
* them is equally sufficient. They do have order. An address appears earlier on the list is likely
* to be tried earlier.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1770")
public final class EquivalentAddressGroup {
private final List<SocketAddress> addrs;
private final Attributes attrs;
/**
* {@link SocketAddress} docs say that the addresses are immutable, so we cache the hashCode.
*/
private final int hashCode;
/**
* List constructor without {@link Attributes}.
*/
public EquivalentAddressGroup(List<SocketAddress> addrs) {
this(addrs, Attributes.EMPTY);
}
/**
* List constructor with {@link Attributes}.
*/
public EquivalentAddressGroup(List<SocketAddress> addrs, @Attr Attributes attrs) {
Preconditions.checkArgument(!addrs.isEmpty(), "addrs is empty");
this.addrs = Collections.unmodifiableList(new ArrayList<>(addrs));
this.attrs = Preconditions.checkNotNull(attrs, "attrs");
// Attributes may contain mutable objects, which means Attributes' hashCode may change over
// time, thus we don't cache Attributes' hashCode.
hashCode = this.addrs.hashCode();
}
/**
* Singleton constructor without Attributes.
*/
public EquivalentAddressGroup(SocketAddress addr) {
this(addr, Attributes.EMPTY);
}
/**
* Singleton constructor with Attributes.
*/
public EquivalentAddressGroup(SocketAddress addr, @Attr Attributes attrs) {
this(Collections.singletonList(addr), attrs);
}
/**
* Returns an immutable list of the addresses.
*/
public List<SocketAddress> getAddresses() {
return addrs;
}
/**
* Returns the attributes.
*/
@Attr
public Attributes getAttributes() {
return attrs;
}
@Override
public String toString() {
// TODO(zpencer): Summarize return value if addr is very large
return "[addrs=" + addrs + ", attrs=" + attrs + "]";
}
@Override
public int hashCode() {
// Avoids creating an iterator on the underlying array list.
return hashCode;
}
/**
* Returns true if the given object is also an {@link EquivalentAddressGroup} with an equal
* address list and equal attribute values.
*
* <p>Note that if the attributes include mutable values, it is possible for two objects to be
* considered equal at one point in time and not equal at another (due to concurrent mutation of
* attribute values).
*/
@Override
public boolean equals(Object other) {
if (!(other instanceof EquivalentAddressGroup)) {
return false;
}
EquivalentAddressGroup that = (EquivalentAddressGroup) other;
if (addrs.size() != that.addrs.size()) {
return false;
}
// Avoids creating an iterator on the underlying array list.
for (int i = 0; i < addrs.size(); i++) {
if (!addrs.get(i).equals(that.addrs.get(i))) {
return false;
}
}
if (!attrs.equals(that.attrs)) {
return false;
}
return true;
}
/**
* Annotation for {@link EquivalentAddressGroup}'s attributes. It follows the annotation semantics
* defined by {@link Attributes}.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/4972")
@Retention(RetentionPolicy.SOURCE)
@Documented
public @interface Attr {}
}
| zhangkun83/grpc-java | core/src/main/java/io/grpc/EquivalentAddressGroup.java | Java | apache-2.0 | 4,376 |
/**
* Package for test tracker task.
*
* @author Alexander Golovatyuk
* @version $Id$
* @since 0.1
*/
package ru.job4j.tracker; | agolovatjuk/alexander4j | chapter_002/src/test/java/ru/job4j/tracker/package-info.java | Java | apache-2.0 | 133 |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del user command."""
import pwd
import os
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelUser(TestBrokerCommand):
def test_100_del_current_user(self):
pwrec = pwd.getpwuid(os.getuid())
self.noouttest(["del_user", "--username", pwrec[0]])
def test_105_verify_gone(self):
pwrec = pwd.getpwuid(os.getuid())
command = ["show_user", "--username", pwrec[0]]
out = self.notfoundtest(command)
self.matchoutput(out, "User %s not found." % pwrec[0], command)
def test_110_del_current_user_again(self):
pwrec = pwd.getpwuid(os.getuid())
command = ["del_user", "--username", pwrec[0]]
out = self.notfoundtest(command)
self.matchoutput(out, "User %s not found." % pwrec[0], command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelUser)
unittest.TextTestRunner(verbosity=2).run(suite)
| guillaume-philippon/aquilon | tests/broker/test_del_user.py | Python | apache-2.0 | 1,774 |
package com.ganxin.codebase.widgets.layout;
import android.annotation.SuppressLint;
import android.content.Context;
import android.util.AttributeSet;
import android.widget.RelativeLayout;
/**
*
* Description : 正方形的Layout <br/>
* author : WangGanxin <br/>
* date : 2016/9/4 <br/>
* email : ganxinvip@163.com <br/>
*/
public class SquareLaylout extends RelativeLayout{
@SuppressLint("NewApi")
public SquareLaylout(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
}
public SquareLaylout(Context context, AttributeSet attrs) {
super(context, attrs);
}
public SquareLaylout(Context context) {
super(context);
}
@SuppressWarnings("unused")
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
// For simple implementation, or internal size is always 0.
// We depend on the container to specify the layout size of
// our view. We can't really know what it is since we will be
// adding and removing different arbitrary views and do not
// want the layout to change as this happens.
setMeasuredDimension(getDefaultSize(0, widthMeasureSpec), getDefaultSize(0, heightMeasureSpec));
// Children are just made to fill our space.
int childWidthSize = getMeasuredWidth();
int childHeightSize = getMeasuredHeight();
//高度和宽度一样
heightMeasureSpec = widthMeasureSpec = MeasureSpec.makeMeasureSpec(childWidthSize, MeasureSpec.EXACTLY);
super.onMeasure(widthMeasureSpec, heightMeasureSpec);
}
}
| WangGanxin/Codebase | app/src/main/java/com/ganxin/codebase/widgets/layout/SquareLaylout.java | Java | apache-2.0 | 1,695 |
package com.hubspot.singularity.scheduler;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.hubspot.singularity.MachineState;
import com.hubspot.singularity.SingularitySlave;
import com.hubspot.singularity.data.SlaveManager;
@Singleton
public class SingularityUsageHelper {
private final SlaveManager slaveManager;
@Inject
public SingularityUsageHelper(SlaveManager slaveManager) {
this.slaveManager = slaveManager;
}
public Set<String> getSlaveIdsToTrackUsageFor() {
List<SingularitySlave> slaves = getSlavesToTrackUsageFor();
Set<String> slaveIds = new HashSet<>(slaves.size());
for (SingularitySlave slave : slaves) {
slaveIds.add(slave.getId());
}
return slaveIds;
}
public List<SingularitySlave> getSlavesToTrackUsageFor() {
List<SingularitySlave> slaves = slaveManager.getObjects();
List<SingularitySlave> slavesToTrack = new ArrayList<>(slaves.size());
for (SingularitySlave slave : slaves) {
if (slave.getCurrentState().getState().isInactive() || slave.getCurrentState().getState() == MachineState.DECOMMISSIONED) {
continue;
}
slavesToTrack.add(slave);
}
return slavesToTrack;
}
}
| andrhamm/Singularity | SingularityService/src/main/java/com/hubspot/singularity/scheduler/SingularityUsageHelper.java | Java | apache-2.0 | 1,323 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.commands.catalog;
import java.util.List;
import java.util.Set;
import org.apache.camel.catalog.CamelComponentCatalog;
import org.apache.camel.catalog.DefaultCamelComponentCatalog;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class CamelComponentCatalogTest {
private static final Logger LOG = LoggerFactory.getLogger(CamelComponentCatalogTest.class);
@Test
public void testFindComponentNames() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames();
assertNotNull(names);
LOG.info("Found {} names", names.size());
assertTrue("Should find some components", names.size() > 0);
}
@Test
public void testFindComponentNamesFilter() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames("testing");
assertNotNull(names);
LOG.info("Found {} names", names.size());
assertTrue("Should find some testing components", names.size() > 0);
}
@Test
public void testFindComponentNamesFilterWildcard() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames("t*");
assertNotNull(names);
LOG.info("Found {} names", names.size());
assertTrue("Should find some t* components", names.size() > 0);
}
@Test
public void testFindComponentNamesFilterTwo() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames("transformation");
assertNotNull(names);
LOG.info("Found {} names", names.size());
assertTrue("Should find some transformation components", names.size() > 0);
}
@Test
public void testFindComponentNamesFilterNoMatch() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
List<String> names = catalog.findComponentNames("cannotmatchme");
assertNotNull(names);
assertTrue("Should not match any components", names.size() == 0);
}
@Test
public void testCoreComponentJson() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
String json = catalog.componentJSonSchema("bean");
assertNotNull(json);
LOG.info(json);
assertTrue("Should find bean component", json.contains("bean"));
}
@Test
public void testFtpComponentJson() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
String json = catalog.componentJSonSchema("ftp");
assertNotNull(json);
LOG.info(json);
assertTrue("Should find ftp component", json.contains("ftp"));
}
@Test
public void testLabels() {
CamelComponentCatalog catalog = new DefaultCamelComponentCatalog();
Set<String> labels = catalog.findComponentLabels();
assertNotNull(labels);
assertTrue("Should find labels", labels.size() > 0);
assertTrue("Should find core label", labels.contains("core"));
assertTrue("Should find testing label", labels.contains("testing"));
assertTrue("Should find rest label", labels.contains("rest"));
}
}
| logzio/camel | platforms/commands/commands-core/src/test/java/org/apache/camel/commands/catalog/CamelComponentCatalogTest.java | Java | apache-2.0 | 4,257 |
using System.Web.Mvc;
using EPiServer.Reference.Commerce.Site.Features.Global.ProductRegistration.Pages;
using EPiServer.Reference.Commerce.Site.Features.Global.ProductRegistration.ViewModels;
using EPiServer.Reference.Commerce.Site.Features.Global.Profile.Pages;
using EPiServer.Web.Mvc;
using EPiServer.Reference.Commerce.Site.Features.Global.Profile.ViewModels;
namespace EPiServer.Reference.Commerce.Site.Features.Global.ProductRegistration.Controllers
{
//[Authorize]
public class ProductRegistration : PageController<ProductRegistrationPage>
{
public ActionResult Index(ProductRegistrationPage currentPage)
{
var viewModel = new ProductRegistrationViewModel { CurrentPage = currentPage };
return View(viewModel);
}
}
} | simerc/QuicksilverPlus | Sources/EPiServer.Reference.Commerce.Site/Features/Global/ProductRegistration/Controllers/ProductRegistrationController.cs | C# | apache-2.0 | 792 |
package bookshop2.supplier.incoming.queryBooks;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import org.aries.runtime.BeanContext;
import org.aries.runtime.RequestContext;
import org.aries.tx.AbstractHandlerUnitTest;
import org.aries.tx.Transactional;
import org.aries.util.FieldUtil;
import org.aries.validate.util.CheckpointManager;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.runners.MockitoJUnitRunner;
import bookshop2.QueryRequestMessage;
import bookshop2.supplier.SupplierProcess;
import bookshop2.util.Bookshop2Fixture;
@RunWith(MockitoJUnitRunner.class)
public class QueryBooksHandlerUnitTest extends AbstractHandlerUnitTest {
private QueryBooksHandlerImpl fixture;
private RequestContext mockRequestContext;
private SupplierProcess mockSupplierProcess;
public String getName() {
return "QueryBooks";
}
public String getDomain() {
return "bookshop2.supplier";
}
public Transactional getFixture() {
return fixture;
}
public SupplierProcess getMockServiceProcess() {
return mockSupplierProcess;
}
@Before
public void setUp() throws Exception {
mockRequestContext = mock(RequestContext.class);
mockSupplierProcess = mock(SupplierProcess.class);
CheckpointManager.setJAXBSessionCache(getJAXBSessionCache());
CheckpointManager.addConfiguration("bookshop2-supplier-service-checks.xml");
super.setUp();
}
@After
public void tearDown() throws Exception {
BeanContext.clear();
mockRequestContext = null;
mockSupplierProcess = null;
fixture = null;
super.tearDown();
}
protected QueryBooksHandlerImpl createFixture() throws Exception {
fixture = new QueryBooksHandlerImpl();
FieldUtil.setFieldValue(fixture, "requestContext", mockRequestContext);
FieldUtil.setFieldValue(fixture, "supplierProcess", mockSupplierProcess);
initialize(fixture);
return fixture;
}
@Test
public void testExecute_queryBooks_Success() throws Exception {
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
setupContext(expectedCorrelationId, expectedTransactionId);
setupMessage(queryRequestMessage);
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_NullRequest() throws Exception {
addExpectedServiceAbortedException("Incoming message is null");
expectedCorrelationId = null;
isRequestExpected = true;
runTestExecute_queryBooks(null);
}
@Test
public void testExecute_queryBooks_EmptyRequest() throws Exception {
addExpectedServiceAbortedException("QueryRequestMessage must include one or more Book(s)");
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.createEmpty_QueryRequestMessage();
setupContext(expectedCorrelationId, expectedTransactionId);
setupMessage(queryRequestMessage);
isAbortExpected = true;
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_NullCorrelationId() throws Exception {
addExpectedServiceAbortedException("CorrelationId null");
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
expectedCorrelationId = null;
setupContext(expectedCorrelationId, expectedTransactionId);
setupMessage(queryRequestMessage);
isAbortExpected = true;
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_EmptyCorrelationId() throws Exception {
addExpectedServiceAbortedException("CorrelationId empty");
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
expectedCorrelationId = "";
setupContext(expectedCorrelationId, expectedTransactionId);
setupMessage(queryRequestMessage);
isAbortExpected = true;
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_NullTransactionId() throws Exception {
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
expectedTransactionId = null;
setupContext(expectedCorrelationId, expectedTransactionId);
//setGlobalTransactionActive(true);
setupMessage(queryRequestMessage);
runTestExecute_queryBooks(queryRequestMessage);
}
@Test
public void testExecute_queryBooks_EmptyTransactionId() throws Exception {
QueryRequestMessage queryRequestMessage = Bookshop2Fixture.create_QueryRequestMessage();
expectedTransactionId = "";
setupContext(expectedCorrelationId, expectedTransactionId);
//setGlobalTransactionActive(true);
setupMessage(queryRequestMessage);
runTestExecute_queryBooks(queryRequestMessage);
}
public void runTestExecute_queryBooks(QueryRequestMessage queryRequestMessage) throws Exception {
try {
fixture = createFixture();
fixture.queryBooks(queryRequestMessage);
if (isGlobalTransactionActive())
validateEnrollTransaction(queryRequestMessage);
validateProcessInvocation(queryRequestMessage);
} catch (Throwable e) {
validateAfterException(e);
} finally {
validateProcessNotification();
validateAfterExecution();
}
}
protected void validateProcessInvocation(QueryRequestMessage queryRequestMessage) throws Exception {
if (!isAbortExpected)
verify(mockSupplierProcess).handle_QueryBooks_request(queryRequestMessage);
}
protected void validateProcessNotification() throws Exception {
//verify(mockSupplierProcess).fireQueryBooksDone();
}
protected void validateAfterExecution() throws Exception {
if (isAbortExpected)
verify(mockSupplierProcess).handle_QueryBooks_request_exception(expectedCorrelationId, expectedException);
super.validateAfterExecution();
}
}
| tfisher1226/ARIES | bookshop2/bookshop2-supplier/bookshop2-supplier-service/src/test/java/bookshop2/supplier/incoming/queryBooks/QueryBooksHandlerUnitTest.java | Java | apache-2.0 | 5,692 |
<?php
/**
* Created by IntelliJ IDEA.
* User: swarm
* Date: 14.10.15
* Time: 10:59
*/
namespace Shelly;
class Palette
{
const MSG_GOLOR_NOT_EXISTS = 'Color does not exists';
/**
* @var array
* contains fg colors and it`s aliases
*/
protected static $colors = [
'black' => 30,
'red' => 31,
'green' => 32,
'yellow' => 33,
'blue' => 34,
'magenta' => 35,
'cyan' => 36,
'white' => 37,
'normal' => 0
];
/**
* @var array
* contains bg colors and it`s aliases
*/
protected static $bgColors = array(
'black' => 40,
'red' => 41,
'green' => 42,
'yellow' => 43,
'blue' => 44,
'magenta' => 45,
'cyan' => 46,
'white' => 47,
);
/**
* @param $alias
* @return string
*/
public function getColorByAlias($alias) {
if(array_key_exists($alias, self::$colors)) {
return self::$colors[$alias];
}
return false;
}
/**
* @param $alias
* @return string
*/
public function getBgColorByAlias($alias) {
if(array_key_exists($alias, self::$bgColors)) {
return self::$bgColors[$alias];
}
return false;
}
/**
* return color code for unix shell, based on passed arguments.
*
* @param string $fg
* @param string $bg
* @param string $bold
* @return string
* @throws \Exception
*/
public function printColourStamp($fg = 'normal', $bg = null, $bold = null)
{
if(!empty($bg) && (false === ($bg = $this->getBgColorByAlias($bg)))) {
throw new \Exception(self::MSG_GOLOR_NOT_EXISTS);
}
if(false === ($fg = $this->getColorByAlias($fg))) {
throw new \Exception(self::MSG_GOLOR_NOT_EXISTS);
}
$return = '';
if (!empty($bold) && (bool)$bold) {
$return .= '1;';
}
if ($bg) {
$return .= $bg . ';';
}
$return .= $fg;
return "\033[{$return}m";
}
} | greezeek/shelly | src/Shelly/Palette.php | PHP | apache-2.0 | 2,125 |
/*
* base/logger.cc
* -------------------------------------------------------------------------
* Definitions for s3::base::logger static members and init() method.
* -------------------------------------------------------------------------
*
* Copyright (c) 2012, Tarick Bedeir.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include "logger.h"
using s3::base::logger;
// log all messages unless instructed otherwise
int logger::s_max_level = INT_MAX;
void logger::init(int max_level)
{
s_max_level = max_level;
openlog(PACKAGE_NAME, 0, 0);
}
| mettacrawler/gcsfs | src/base/logger.cc | C++ | apache-2.0 | 1,097 |
module Chronic
module Handlers
module_function
# Handle month/day
def handle_m_d(month, day, time_tokens, options)
month.start = self.now
span = month.this(options[:context])
year, month = span.begin.year, span.begin.month
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
end
# Handle repeater-month-name/scalar-day
def handle_rmn_sd(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName)
day = tokens[1].get_tag(ScalarDay).type
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[2..tokens.size], options)
end
# Handle repeater-month-name/scalar-day with separator-on
def handle_rmn_sd_on(tokens, options)
if tokens.size > 3
month = tokens[2].get_tag(RepeaterMonthName)
day = tokens[3].get_tag(ScalarDay).type
token_range = 0..1
else
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(ScalarDay).type
token_range = 0..0
end
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[token_range], options)
end
# Handle repeater-month-name/ordinal-day
def handle_rmn_od(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName)
day = tokens[1].get_tag(OrdinalDay).type
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[2..tokens.size], options)
end
# Handle ordinal this month
def handle_od_rm(tokens, options)
day = tokens[0].get_tag(OrdinalDay).type
month = tokens[2].get_tag(RepeaterMonth)
handle_m_d(month, day, tokens[3..tokens.size], options)
end
# Handle ordinal-day/repeater-month-name
def handle_od_rmn(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[0].get_tag(OrdinalDay).type
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[2..tokens.size], options)
end
def handle_sy_rmn_od(tokens, options)
year = tokens[0].get_tag(ScalarYear).type
month = tokens[1].get_tag(RepeaterMonthName).index
day = tokens[2].get_tag(OrdinalDay).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle scalar-day/repeater-month-name
def handle_sd_rmn(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[0].get_tag(ScalarDay).type
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[2..tokens.size], options)
end
# Handle repeater-month-name/ordinal-day with separator-on
def handle_rmn_od_on(tokens, options)
if tokens.size > 3
month = tokens[2].get_tag(RepeaterMonthName)
day = tokens[3].get_tag(OrdinalDay).type
token_range = 0..1
else
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(OrdinalDay).type
token_range = 0..0
end
return if month_overflow?(self.now.year, month.index, day)
handle_m_d(month, day, tokens[token_range], options)
end
# Handle repeater-month-name/scalar-year
def handle_rmn_sy(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName).index
year = tokens[1].get_tag(ScalarYear).type
if month == 12
next_month_year = year + 1
next_month_month = 1
else
next_month_year = year
next_month_month = month + 1
end
begin
end_time = Chronic.time_class.local(next_month_year, next_month_month)
Span.new(Chronic.time_class.local(year, month), end_time)
rescue ArgumentError
nil
end
end
# Handle generic timestamp (ruby 1.8)
def handle_generic(tokens, options)
t = Chronic.time_class.parse(options[:text])
Span.new(t, t + 1)
end
# Handle repeater-month-name/scalar-day/scalar-year
def handle_rmn_sd_sy(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName).index
day = tokens[1].get_tag(ScalarDay).type
year = tokens[2].get_tag(ScalarYear).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle repeater-month-name/ordinal-day/scalar-year
def handle_rmn_od_sy(tokens, options)
month = tokens[0].get_tag(RepeaterMonthName).index
day = tokens[1].get_tag(OrdinalDay).type
year = tokens[2].get_tag(ScalarYear).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle oridinal-day/repeater-month-name/scalar-year
def handle_od_rmn_sy(tokens, options)
day = tokens[0].get_tag(OrdinalDay).type
month = tokens[1].get_tag(RepeaterMonthName).index
year = tokens[2].get_tag(ScalarYear).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle scalar-day/repeater-month-name/scalar-year
def handle_sd_rmn_sy(tokens, options)
new_tokens = [tokens[1], tokens[0], tokens[2]]
time_tokens = tokens.last(tokens.size - 3)
handle_rmn_sd_sy(new_tokens + time_tokens, options)
end
# Handle scalar-month/scalar-day/scalar-year (endian middle)
def handle_sm_sd_sy(tokens, options)
month = tokens[0].get_tag(ScalarMonth).type
day = tokens[1].get_tag(ScalarDay).type
year = tokens[2].get_tag(ScalarYear).type
time_tokens = tokens.last(tokens.size - 3)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle scalar-day/scalar-month/scalar-year (endian little)
def handle_sd_sm_sy(tokens, options)
new_tokens = [tokens[1], tokens[0], tokens[2]]
time_tokens = tokens.last(tokens.size - 3)
handle_sm_sd_sy(new_tokens + time_tokens, options)
end
# Handle scalar-year/scalar-month/scalar-day
def handle_sy_sm_sd(tokens, options)
new_tokens = [tokens[1], tokens[2], tokens[0]]
time_tokens = tokens.last(tokens.size - 3)
handle_sm_sd_sy(new_tokens + time_tokens, options)
end
# Handle scalar-month/scalar-day
def handle_sm_sd(tokens, options)
month = tokens[0].get_tag(ScalarMonth).type
day = tokens[1].get_tag(ScalarDay).type
year = self.now.year
time_tokens = tokens.last(tokens.size - 2)
return if month_overflow?(year, month, day)
begin
day_start = Chronic.time_class.local(year, month, day)
day_start = Chronic.time_class.local(year + 1, month, day) if options[:context] == :future && day_start < now
day_or_time(day_start, time_tokens, options)
rescue ArgumentError
nil
end
end
# Handle scalar-day/scalar-month
def handle_sd_sm(tokens, options)
new_tokens = [tokens[1], tokens[0]]
time_tokens = tokens.last(tokens.size - 2)
handle_sm_sd(new_tokens + time_tokens, options)
end
def handle_year_and_month(year, month)
if month == 12
next_month_year = year + 1
next_month_month = 1
else
next_month_year = year
next_month_month = month + 1
end
begin
end_time = Chronic.time_class.local(next_month_year, next_month_month)
Span.new(Chronic.time_class.local(year, month), end_time)
rescue ArgumentError
nil
end
end
# Handle scalar-month/scalar-year
def handle_sm_sy(tokens, options)
month = tokens[0].get_tag(ScalarMonth).type
year = tokens[1].get_tag(ScalarYear).type
handle_year_and_month(year, month)
end
# Handle scalar-year/scalar-month
def handle_sy_sm(tokens, options)
year = tokens[0].get_tag(ScalarYear).type
month = tokens[1].get_tag(ScalarMonth).type
handle_year_and_month(year, month)
end
# Handle RepeaterDayName RepeaterMonthName OrdinalDay
def handle_rdn_rmn_od(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(OrdinalDay).type
time_tokens = tokens.last(tokens.size - 3)
year = self.now.year
return if month_overflow?(year, month.index, day)
begin
if time_tokens.empty?
start_time = Chronic.time_class.local(year, month.index, day)
end_time = time_with_rollover(year, month.index, day + 1)
Span.new(start_time, end_time)
else
day_start = Chronic.time_class.local(year, month.index, day)
day_or_time(day_start, time_tokens, options)
end
rescue ArgumentError
nil
end
end
# Handle RepeaterDayName OrdinalDay
def handle_rdn_od(tokens, options)
day = tokens[1].get_tag(OrdinalDay).type
time_tokens = tokens.last(tokens.size - 2)
year = self.now.year
month = self.now.month
if options[:context] == :future
self.now.day > day ? month += 1 : month
end
return if month_overflow?(year, month, day)
begin
if time_tokens.empty?
start_time = Chronic.time_class.local(year, month, day)
end_time = time_with_rollover(year, month, day + 1)
Span.new(start_time, end_time)
else
day_start = Chronic.time_class.local(year, month, day)
day_or_time(day_start, time_tokens, options)
end
rescue ArgumentError
nil
end
end
# Handle RepeaterDayName RepeaterMonthName ScalarDay
def handle_rdn_rmn_sd(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(ScalarDay).type
time_tokens = tokens.last(tokens.size - 3)
year = self.now.year
return if month_overflow?(year, month.index, day)
begin
if time_tokens.empty?
start_time = Chronic.time_class.local(year, month.index, day)
end_time = time_with_rollover(year, month.index, day + 1)
Span.new(start_time, end_time)
else
day_start = Chronic.time_class.local(year, month.index, day)
day_or_time(day_start, time_tokens, options)
end
rescue ArgumentError
nil
end
end
# Handle RepeaterDayName RepeaterMonthName ScalarDay ScalarYear
def handle_rdn_rmn_sd_sy(tokens, options)
month = tokens[1].get_tag(RepeaterMonthName)
day = tokens[2].get_tag(ScalarDay).type
year = tokens[3].get_tag(ScalarYear).type
return if month_overflow?(year, month.index, day)
begin
start_time = Chronic.time_class.local(year, month.index, day)
end_time = time_with_rollover(year, month.index, day + 1)
Span.new(start_time, end_time)
rescue ArgumentError
nil
end
end
def handle_sm_rmn_sy(tokens, options)
day = tokens[0].get_tag(ScalarDay).type
month = tokens[1].get_tag(RepeaterMonthName).index
year = tokens[2].get_tag(ScalarYear).type
if tokens.size > 3
time = get_anchor([tokens.last], options).begin
h, m, s = time.hour, time.min, time.sec
time = Chronic.time_class.local(year, month, day, h, m, s)
end_time = Chronic.time_class.local(year, month, day + 1, h, m, s)
else
time = Chronic.time_class.local(year, month, day)
day += 1 unless day >= 31
end_time = Chronic.time_class.local(year, month, day)
end
Span.new(time, end_time)
end
# anchors
# Handle repeaters
def handle_r(tokens, options)
dd_tokens = dealias_and_disambiguate_times(tokens, options)
get_anchor(dd_tokens, options)
end
# Handle repeater/grabber/repeater
def handle_r_g_r(tokens, options)
new_tokens = [tokens[1], tokens[0], tokens[2]]
handle_r(new_tokens, options)
end
# arrows
# Handle scalar/repeater/pointer helper
def handle_srp(tokens, span, options)
distance = tokens[0].get_tag(Scalar).type
repeater = tokens[1].get_tag(Repeater)
pointer = tokens[2].get_tag(Pointer).type
repeater.offset(span, distance, pointer) if repeater.respond_to?(:offset)
end
# Handle scalar/repeater/pointer
def handle_s_r_p(tokens, options)
span = Span.new(self.now, self.now + 1)
handle_srp(tokens, span, options)
end
# Handle pointer/scalar/repeater
def handle_p_s_r(tokens, options)
new_tokens = [tokens[1], tokens[2], tokens[0]]
handle_s_r_p(new_tokens, options)
end
# Handle scalar/repeater/pointer/anchor
def handle_s_r_p_a(tokens, options)
anchor_span = get_anchor(tokens[3..tokens.size - 1], options)
handle_srp(tokens, anchor_span, options)
end
def handle_s_r_a_s_r_p_a(tokens, options)
anchor_span = get_anchor(tokens[4..tokens.size - 1], options)
span = handle_srp(tokens[0..1]+tokens[4..6], anchor_span, options)
handle_srp(tokens[2..3]+tokens[4..6], span, options)
end
# narrows
# Handle oridinal repeaters
def handle_orr(tokens, outer_span, options)
repeater = tokens[1].get_tag(Repeater)
repeater.start = outer_span.begin - 1
ordinal = tokens[0].get_tag(Ordinal).type
span = nil
ordinal.times do
span = repeater.next(:future)
if span.begin >= outer_span.end
span = nil
break
end
end
span
end
# Handle ordinal/repeater/separator/repeater
def handle_o_r_s_r(tokens, options)
outer_span = get_anchor([tokens[3]], options)
handle_orr(tokens[0..1], outer_span, options)
end
# Handle ordinal/repeater/grabber/repeater
def handle_o_r_g_r(tokens, options)
outer_span = get_anchor(tokens[2..3], options)
handle_orr(tokens[0..1], outer_span, options)
end
# support methods
def day_or_time(day_start, time_tokens, options)
outer_span = Span.new(day_start, day_start + (24 * 60 * 60))
if !time_tokens.empty?
self.now = outer_span.begin
get_anchor(dealias_and_disambiguate_times(time_tokens, options), options)
else
outer_span
end
end
def get_anchor(tokens, options)
grabber = Grabber.new(:this)
pointer = :future
repeaters = get_repeaters(tokens)
repeaters.size.times { tokens.pop }
if tokens.first && tokens.first.get_tag(Grabber)
grabber = tokens.shift.get_tag(Grabber)
end
head = repeaters.shift
head.start = self.now
case grabber.type
when :last
outer_span = head.next(:past)
when :this
if options[:context] != :past and repeaters.size > 0
outer_span = head.this(:none)
else
outer_span = head.this(options[:context])
end
when :next
outer_span = head.next(:future)
else
raise "Invalid grabber"
end
if Chronic.debug
puts "Handler-class: #{head.class}"
puts "--#{outer_span}"
end
find_within(repeaters, outer_span, pointer)
end
def get_repeaters(tokens)
tokens.map { |token| token.get_tag(Repeater) }.compact.sort.reverse
end
def month_overflow?(year, month, day)
if Date.leap?(year)
day > RepeaterMonth::MONTH_DAYS_LEAP[month - 1]
else
day > RepeaterMonth::MONTH_DAYS[month - 1]
end
rescue ArgumentError
false
end
# Recursively finds repeaters within other repeaters.
# Returns a Span representing the innermost time span
# or nil if no repeater union could be found
def find_within(tags, span, pointer)
puts "--#{span}" if Chronic.debug
return span if tags.empty?
head = tags.shift
head.start = (pointer == :future ? span.begin : span.end)
h = head.this(:none)
if span.cover?(h.begin) || span.cover?(h.end)
find_within(tags, h, pointer)
end
end
def time_with_rollover(year, month, day)
date_parts =
if month_overflow?(year, month, day)
if month == 12
[year + 1, 1, 1]
else
[year, month + 1, 1]
end
else
[year, month, day]
end
Chronic.time_class.local(*date_parts)
end
def dealias_and_disambiguate_times(tokens, options)
# handle aliases of am/pm
# 5:00 in the morning -> 5:00 am
# 7:00 in the evening -> 7:00 pm
day_portion_index = nil
tokens.each_with_index do |t, i|
if t.get_tag(RepeaterDayPortion)
day_portion_index = i
break
end
end
time_index = nil
tokens.each_with_index do |t, i|
if t.get_tag(RepeaterTime)
time_index = i
break
end
end
if day_portion_index && time_index
t1 = tokens[day_portion_index]
t1tag = t1.get_tag(RepeaterDayPortion)
case t1tag.type
when :morning
puts '--morning->am' if Chronic.debug
t1.untag(RepeaterDayPortion)
t1.tag(RepeaterDayPortion.new(:am))
when :afternoon, :evening, :night
puts "--#{t1tag.type}->pm" if Chronic.debug
t1.untag(RepeaterDayPortion)
t1.tag(RepeaterDayPortion.new(:pm))
end
end
# handle ambiguous times if :ambiguous_time_range is specified
if options[:ambiguous_time_range] != :none
ambiguous_tokens = []
tokens.each_with_index do |token, i|
ambiguous_tokens << token
next_token = tokens[i + 1]
if token.get_tag(RepeaterTime) && token.get_tag(RepeaterTime).type.ambiguous? && (!next_token || !next_token.get_tag(RepeaterDayPortion))
distoken = Token.new('disambiguator')
distoken.tag(RepeaterDayPortion.new(options[:ambiguous_time_range]))
ambiguous_tokens << distoken
end
end
tokens = ambiguous_tokens
end
tokens
end
end
end
| kevstessens/docnetrails | vendor/bundle/gems/chronic-0.9.1/lib/chronic/handlers.rb | Ruby | apache-2.0 | 18,912 |
<?php
session_start();
include_once('includes/connection.php');
include_once('includes/product.php');
include_once('includes/user.php');
$page_title = "Products from this seller.php";
include('includes/header.php');
$product = new Product();
$user = new user();
//$data = $product->fetch_all();
$seller_id = isset($_GET['seller']) ? $_GET['seller'] : ""; // getting seller id
//if seller id is not empty
if(!empty($seller_id)) {
$seller_data = $user->fetch_user($seller_id);
$page_num = isset($_GET['page_num']) ? $_GET['page_num'] : 1;
if (isset($_GET['action']) and $_GET['action'] == 'search') {
$keywords = $_GET['search_str'];
$data = $product->search_seller($keywords, $page_num, $seller_id);
} else {
$data = $product->fetch_by_category_seller($category_id, $page_num, $seller_id);
}
?>
<div class="box_center">
<h3>Products from seller: </h3> <?php echo $seller_data['first_name']. " ". $seller_data['last_name'] ?>
<table class="show_table">
<?php
if (empty($data)) {
echo "<br><br><br><p>Nothing found!</p><br><br><br>";
}
$MAX_COLUMN = 4;
$column_count = 0;
foreach ($data as $item) {
$icon_path = "./uploads/icons/" . $item['icon'];
if (!file_exists($icon_path) || is_dir($icon_path)) {
$icon_path = "./uploads/icons/" . "default.png";
}
if ($column_count % $MAX_COLUMN == 0) {
echo "<tr>";
}
?>
<td>
<div class="item">
<a href="product_detail.php?id=<?php echo $item['id']; ?>"><img class="item_icon"
src=<?php echo $icon_path ?> alt="item
picture"><br><?php echo $item['name']; ?>
</a>
<br>
<!--
<small>posted in
<?php
date_default_timezone_set('America/Detroit');
echo date('l jS', $article['create_date']);
?>
</small>
-->
</div>
</td>
<?php
$column_count++;
if ($column_count % $MAX_COLUMN == 0) {
echo "</tr>";
}
}
?>
</table>
<div class="page_nav">
<?php
if ($page_num > 1) {
echo "<a href='seller.php?cate_id=" . $category_id . "&page_num=" . ($page_num - 1) . "&seller=".$seller_id."'>prev </a>";
}
if (isset($_GET['action']) and $_GET['action'] == 'search') {
$next_data = $product->search_seller($keywords, $page_num + 1, $seller_id);
} else {
$next_data = $product->fetch_by_category_seller($category_id, $page_num + 1, $seller_id);
}
if (count($next_data) > 0) {
echo "<a href='seller.php?cate_id=" . $category_id . "&page_num=" . ($page_num + 1) . "&seller=".$seller_id . "'>next</a>";
}
?>
</div>
</div>
<?php
include('includes/footer.php');
}else{ // if no seller id given go to index.php page
include('includes/index.php');
}
?>
| updownlife/campustore | seller.php | PHP | apache-2.0 | 3,528 |
package view;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Matrix;
import android.util.AttributeSet;
import android.util.FloatMath;
import android.view.MotionEvent;
import android.widget.ImageView;
public class ImageControl extends ImageView {
public ImageControl(Context context) {
super(context);
// TODO Auto-generated constructor stub
}
public ImageControl(Context context, AttributeSet attrs) {
super(context, attrs);
// TODO Auto-generated constructor stub
}
public ImageControl(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
// TODO Auto-generated constructor stub
}
// ImageView img;
Matrix imgMatrix = null; // ¶¨ÒåͼƬµÄ±ä»»¾ØÕó
static final int DOUBLE_CLICK_TIME_SPACE = 300; // Ë«»÷ʱ¼ä¼ä¸ô
static final int DOUBLE_POINT_DISTANCE = 10; // Á½µã·Å´óÁ½µã¼ä×îС¼ä¾à
static final int NONE = 0;
static final int DRAG = 1; // Í϶¯²Ù×÷
static final int ZOOM = 2; // ·Å´óËõС²Ù×÷
private int mode = NONE; // µ±Ç°Ä£Ê½
float bigScale = 3f; // ĬÈÏ·Å´ó±¶Êý
Boolean isBig = false; // ÊÇ·ñÊÇ·Å´ó״̬
long lastClickTime = 0; // µ¥»÷ʱ¼ä
float startDistance; // ¶àµã´¥ÃþÁ½µã¾àÀë
float endDistance; // ¶àµã´¥ÃþÁ½µã¾àÀë
float topHeight; // ״̬À¸¸ß¶ÈºÍ±êÌâÀ¸¸ß¶È
Bitmap primaryBitmap = null;
float contentW; // ÆÁÄ»ÄÚÈÝÇø¿í¶È
float contentH; // ÆÁÄ»ÄÚÈÝÇø¸ß¶È
float primaryW; // Ôͼ¿í¶È
float primaryH; // Ôͼ¸ß¶È
float scale; // ÊÊºÏÆÁÄ»Ëõ·Å±¶Êý
Boolean isMoveX = true; // ÊÇ·ñÔÊÐíÔÚXÖáÍ϶¯
Boolean isMoveY = true; // ÊÇ·ñÔÊÐíÔÚYÖáÍ϶¯
float startX;
float startY;
float endX;
float endY;
float subX;
float subY;
float limitX1;
float limitX2;
float limitY1;
float limitY2;
ICustomMethod mCustomMethod = null;
/**
* ³õʼ»¯Í¼Æ¬
*
* @param bitmap
* ÒªÏÔʾµÄͼƬ
* @param contentW
* ÄÚÈÝÇøÓò¿í¶È
* @param contentH
* ÄÚÈÝÇøÓò¸ß¶È
* @param topHeight
* ״̬À¸¸ß¶ÈºÍ±êÌâÀ¸¸ß¶ÈÖ®ºÍ
*/
public void imageInit(Bitmap bitmap, int contentW, int contentH,
int topHeight, ICustomMethod iCustomMethod) {
this.primaryBitmap = bitmap;
this.contentW = contentW;
this.contentH = contentH;
this.topHeight = topHeight;
mCustomMethod = iCustomMethod;
primaryW = primaryBitmap.getWidth();
primaryH = primaryBitmap.getHeight();
float scaleX = (float) contentW / primaryW;
float scaleY = (float) contentH / primaryH;
scale = scaleX < scaleY ? scaleX : scaleY;
if (scale < 1 && 1 / scale < bigScale) {
bigScale = (float) (1 / scale + 0.5);
}
imgMatrix = new Matrix();
subX = (contentW - primaryW * scale) / 2;
subY = (contentH - primaryH * scale) / 2;
this.setImageBitmap(primaryBitmap);
this.setScaleType(ScaleType.MATRIX);
imgMatrix.postScale(scale, scale);
imgMatrix.postTranslate(subX, subY);
this.setImageMatrix(imgMatrix);
}
/**
* °´Ï²Ù×÷
*
* @param event
*/
public void mouseDown(MotionEvent event) {
mode = NONE;
startX = event.getRawX();
startY = event.getRawY();
if (event.getPointerCount() == 1) {
// Èç¹ûÁ½´Îµã»÷ʱ¼ä¼ä¸ôСÓÚÒ»¶¨Öµ£¬ÔòĬÈÏΪ˫»÷ʼþ
if (event.getEventTime() - lastClickTime < DOUBLE_CLICK_TIME_SPACE) {
changeSize(startX, startY);
} else if (isBig) {
mode = DRAG;
}
}
lastClickTime = event.getEventTime();
}
/**
* ·ÇµÚÒ»¸öµã°´Ï²Ù×÷
*
* @param event
*/
public void mousePointDown(MotionEvent event) {
startDistance = getDistance(event);
if (startDistance > DOUBLE_POINT_DISTANCE) {
mode = ZOOM;
} else {
mode = NONE;
}
}
/**
* ÒÆ¶¯²Ù×÷
*
* @param event
*/
public void mouseMove(MotionEvent event) {
if ((mode == DRAG) && (isMoveX || isMoveY)) {
float[] XY = getTranslateXY(imgMatrix);
float transX = 0;
float transY = 0;
if (isMoveX) {
endX = event.getRawX();
transX = endX - startX;
if ((XY[0] + transX) <= limitX1) {
transX = limitX1 - XY[0];
}
if ((XY[0] + transX) >= limitX2) {
transX = limitX2 - XY[0];
}
}
if (isMoveY) {
endY = event.getRawY();
transY = endY - startY;
if ((XY[1] + transY) <= limitY1) {
transY = limitY1 - XY[1];
}
if ((XY[1] + transY) >= limitY2) {
transY = limitY2 - XY[1];
}
}
imgMatrix.postTranslate(transX, transY);
startX = endX;
startY = endY;
this.setImageMatrix(imgMatrix);
} else if (mode == ZOOM && event.getPointerCount() > 1) {
endDistance = getDistance(event);
float dif = endDistance - startDistance;
if (Math.abs(endDistance - startDistance) > DOUBLE_POINT_DISTANCE) {
if (isBig) {
if (dif < 0) {
changeSize(0, 0);
mode = NONE;
}
} else if (dif > 0) {
float x = event.getX(0) / 2 + event.getX(1) / 2;
float y = event.getY(0) / 2 + event.getY(1) / 2;
changeSize(x, y);
mode = NONE;
}
}
}
}
/**
* Êó±ȩ̂Æðʼþ
*/
public void mouseUp() {
mode = NONE;
}
/**
* ͼƬ·Å´óËõС
*
* @param x
* µã»÷µãX×ø±ê
* @param y
* µã»÷µãY×ø±ê
*/
private void changeSize(float x, float y) {
if (isBig) {
// Èç¹û´¦ÓÚ×î´ó״̬£¬Ôò»¹Ô
imgMatrix.reset();
imgMatrix.postScale(scale, scale);
imgMatrix.postTranslate(subX, subY);
isBig = false;
} else {
imgMatrix.postScale(bigScale, bigScale); // ÔÚÔÓоØÕóºó³Ë·Å´ó±¶Êý
float transX = -((bigScale - 1) * x);
float transY = -((bigScale - 1) * (y - topHeight)); // (bigScale-1)(y-statusBarHeight-subY)+2*subY;
float currentWidth = primaryW * scale * bigScale; // ·Å´óºóͼƬ´óС
float currentHeight = primaryH * scale * bigScale;
// Èç¹ûͼƬ·Å´óºó³¬³öÆÁÄ»·¶Î§´¦Àí
if (currentHeight > contentH) {
limitY1 = -(currentHeight - contentH); // Æ½ÒÆÏÞÖÆ
limitY2 = 0;
isMoveY = true; // ÔÊÐíÔÚYÖáÉÏÍ϶¯
float currentSubY = bigScale * subY; // µ±Ç°Æ½ÒƾàÀë
// Æ½ÒÆºó£¬ÄÚÈÝÇøÓòÉϲ¿Óпհ״¦Àí°ì·¨
if (-transY < currentSubY) {
transY = -currentSubY;
}
// Æ½ÒÆºó£¬ÄÚÈÝÇøÓòϲ¿Óпհ״¦Àí°ì·¨
if (currentSubY + transY < limitY1) {
transY = -(currentHeight + currentSubY - contentH);
}
} else {
// Èç¹ûͼƬ·Å´óºóûÓг¬³öÆÁÄ»·¶Î§´¦Àí£¬Ôò²»ÔÊÐíÍ϶¯
isMoveY = false;
}
if (currentWidth > contentW) {
limitX1 = -(currentWidth - contentW);
limitX2 = 0;
isMoveX = true;
float currentSubX = bigScale * subX;
if (-transX < currentSubX) {
transX = -currentSubX;
}
if (currentSubX + transX < limitX1) {
transX = -(currentWidth + currentSubX - contentW);
}
} else {
isMoveX = false;
}
imgMatrix.postTranslate(transX, transY);
isBig = true;
}
this.setImageMatrix(imgMatrix);
if (mCustomMethod != null) {
mCustomMethod.customMethod(isBig);
}
}
/**
* »ñÈ¡±ä»»¾ØÕóÖÐXÖáÆ«ÒÆÁ¿ºÍYÖáÆ«ÒÆÁ¿
*
* @param matrix
* ±ä»»¾ØÕó
* @return
*/
private float[] getTranslateXY(Matrix matrix) {
float[] values = new float[9];
matrix.getValues(values);
float[] floats = new float[2];
floats[0] = values[Matrix.MTRANS_X];
floats[1] = values[Matrix.MTRANS_Y];
return floats;
}
/**
* »ñÈ¡Á½µã¼äµÄ¾àÀë
*
* @param event
* @return
*/
private float getDistance(MotionEvent event) {
float x = event.getX(0) - event.getX(1);
float y = event.getY(0) - event.getY(1);
return FloatMath.sqrt(x * x + y * y);
}
/**
* @author Administrator Óû§×Ô¶¨Òå·½·¨
*/
public interface ICustomMethod {
public void customMethod(Boolean currentStatus);
}
} | ydc201211/VirtualCampus | src/view/ImageControl.java | Java | apache-2.0 | 7,455 |
# AUTHOR
# jan molic /mig/at/1984/dot/cz/
#
# DESCRIPTION
# Hash with preserved order and some array-like extensions
# Public domain.
#
# THANKS
# Andrew Johnson for his suggestions and fixes of Hash[],
# merge, to_a, inspect and shift
module Buildr
module OSGi
class OrderedHash < ::Hash
attr_accessor :order
class << self
def [] *args
hsh = OrderedHash.new
if Hash === args[0]
hsh.replace args[0]
elsif (args.size % 2) != 0
raise ArgumentError, "odd number of elements for Hash"
else
0.step(args.size - 1, 2) do |a|
b = a + 1
hsh[args[a]] = args[b]
end
end
hsh
end
end
def initialize(*a, &b)
super
@order = []
end
def store_only a, b
store a, b
end
alias orig_store store
def store a, b
@order.push a unless has_key? a
super a, b
end
alias []= store
def == hsh2
return false if @order != hsh2.order
super hsh2
end
def clear
@order = []
super
end
def delete key
@order.delete key
super
end
def each_key
@order.each { |k| yield k }
self
end
def each_value
@order.each { |k| yield self[k] }
self
end
def each
@order.each { |k| yield k, self[k] }
self
end
alias each_pair each
def delete_if
@order.clone.each { |k|
delete k if yield(k)
}
self
end
def values
ary = []
@order.each { |k| ary.push self[k] }
ary
end
def keys
@order
end
def first
{@order.first => self[@order.first]}
end
def last
{@order.last => self[@order.last]}
end
def invert
hsh2 = Hash.new
@order.each { |k| hsh2[self[k]] = k }
hsh2
end
def reject &block
self.dup.delete_if &block
end
def reject! &block
hsh2 = reject &block
self == hsh2 ? nil : hsh2
end
def replace hsh2
@order = hsh2.keys
super hsh2
end
def shift
key = @order.first
key ? [key, delete(key)] : super
end
def unshift k, v
unless self.include? k
@order.unshift k
orig_store(k, v)
true
else
false
end
end
def push k, v
unless self.include? k
@order.push k
orig_store(k, v)
true
else
false
end
end
def pop
key = @order.last
key ? [key, delete(key)] : nil
end
def to_a
ary = []
each { |k, v| ary << [k, v] }
ary
end
def to_s
self.to_a.to_s
end
def inspect
ary = []
each {|k, v| ary << k.inspect + "=>" + v.inspect}
'{' + ary.join(", ") + '}'
end
def update hsh2
hsh2.each { |k, v| self[k] = v }
self
end
alias :merge! update
def merge hsh2
self.dup.update(hsh2)
end
def select
ary = []
each { |k, v| ary << [k, v] if yield k, v }
ary
end
def class
Hash
end
def __class__
OrderedHash
end
attr_accessor "to_yaml_style"
def yaml_inline= bool
if respond_to?("to_yaml_style")
self.to_yaml_style = :inline
else
unless defined? @__yaml_inline_meth
@__yaml_inline_meth =
lambda {|opts|
YAML::quick_emit(object_id, opts) {|emitter|
emitter << '{ ' << map{|kv| kv.join ': '}.join(', ') << ' }'
}
}
class << self
def to_yaml opts = {}
begin
@__yaml_inline ? @__yaml_inline_meth[ opts ] : super
rescue
@to_yaml_style = :inline
super
end
end
end
end
end
@__yaml_inline = bool
end
def yaml_inline!()
self.yaml_inline = true
end
def each_with_index
@order.each_with_index { |k, index| yield k, self[k], index }
self
end
end # class OrderedHash
end
end | realityforge/buildr-osgi-assembler | lib/buildr/osgi/ordered_hash.rb | Ruby | apache-2.0 | 4,530 |
// This is a generated file. Not intended for manual editing.
package com.github.joshholl.intellij.csharp.lang.psi;
import java.util.List;
import org.jetbrains.annotations.*;
import com.intellij.psi.PsiElement;
public interface CSharpThisAccess extends PsiElement {
}
| joshholl/intellij-csharp | gen/com/github/joshholl/intellij/csharp/lang/psi/CSharpThisAccess.java | Java | apache-2.0 | 271 |
package com.linkedin.common.util;
import com.datahub.test.testing.AspectBar;
import com.datahub.test.testing.AspectBaz;
import com.datahub.test.testing.AspectFoo;
import com.datahub.test.testing.AspectFooArray;
import com.datahub.test.testing.AspectInvalid;
import com.datahub.test.testing.EntitySnapshot;
import com.datahub.test.testing.EntityValueArray;
import com.datahub.test.testing.MixedRecord;
import com.datahub.test.testing.StringUnion;
import com.datahub.test.testing.StringUnionArray;
import com.datahub.test.testing.singleaspectentity.EntityValue;
import com.datahub.test.testing.urn.FooUrn;
import com.datahub.util.ModelUtils;
import com.datahub.util.RecordUtils;
import com.datahub.util.exception.InvalidSchemaException;
import com.datahub.util.exception.ModelConversionException;
import com.datahub.util.validator.ValidationUtils;
import com.linkedin.common.urn.Urn;
import com.linkedin.data.schema.PathSpec;
import com.linkedin.data.schema.RecordDataSchema;
import com.linkedin.data.template.RecordTemplate;
import com.linkedin.data.template.StringArray;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Optional;
import org.apache.commons.io.IOUtils;
import org.testng.annotations.Test;
import static com.datahub.utils.TestUtils.*;
import static org.testng.Assert.*;
public class RecordUtilsTest {
@Test
public void testToJsonString() throws IOException {
AspectFoo foo = new AspectFoo().setValue("foo");
String expected =
loadJsonFromResource("foo.json").replaceAll("\\s+", "").replaceAll("\\n", "").replaceAll("\\r", "");
String actual = RecordUtils.toJsonString(foo);
assertEquals(actual, expected);
}
@Test
public void testToRecordTemplate() throws IOException {
AspectFoo expected = new AspectFoo().setValue("foo");
String jsonString = loadJsonFromResource("foo.json");
AspectFoo actual = RecordUtils.toRecordTemplate(AspectFoo.class, jsonString);
assertEquals(actual, expected);
RecordTemplate actual2 = RecordUtils.toRecordTemplate(AspectFoo.class.getCanonicalName(), expected.data());
assertEquals(actual2.getClass(), AspectFoo.class);
assertEquals(actual2, expected);
}
@Test(expectedExceptions = ModelConversionException.class)
public void testToRecordTemplateFromInvalidString() {
RecordUtils.toRecordTemplate(AspectFoo.class, "invalid_json");
}
@Test
public void testGetValidRecordDataSchemaField() {
RecordDataSchema schema = ValidationUtils.getRecordSchema(AspectFoo.class);
RecordDataSchema.Field expected = schema.getField("value");
assertEquals(RecordUtils.getRecordDataSchemaField(new AspectFoo().setValue("foo"), "value"), expected);
}
@Test(expectedExceptions = InvalidSchemaException.class)
public void testGetInvalidRecordDataSchemaField() {
RecordUtils.getRecordDataSchemaField(new AspectFoo().setValue("foo"), "non-existing-field");
}
@Test
public void testSetRecordTemplatePrimitiveField() {
AspectBaz baz = new AspectBaz();
RecordUtils.setRecordTemplatePrimitiveField(baz, "boolField", Boolean.FALSE);
RecordUtils.setRecordTemplatePrimitiveField(baz, "stringField", "baz");
RecordUtils.setRecordTemplatePrimitiveField(baz, "longField", Long.valueOf(1234L));
assertFalse(baz.isBoolField());
assertEquals(baz.getStringField(), "baz");
assertEquals(baz.getLongField(), Long.valueOf(1234L));
}
@Test
public void testSetRecordTemplateComplexField() throws IOException {
AspectBaz baz = new AspectBaz();
StringArray stringArray = new StringArray(Arrays.asList("1", "2", "3"));
RecordUtils.setRecordTemplateComplexField(baz, "arrayField", stringArray);
AspectFoo foo = new AspectFoo().setValue("foo");
RecordUtils.setRecordTemplateComplexField(baz, "recordField", foo);
assertEquals(baz.getArrayField(), stringArray);
assertEquals(baz.getRecordField(), foo);
}
@Test
public void testGetRecordTemplatePrimitiveField() throws IOException {
AspectBaz baz = loadAspectBaz("baz.json");
assertTrue(RecordUtils.getRecordTemplateField(baz, "boolField", Boolean.class));
assertEquals(RecordUtils.getRecordTemplateField(baz, "stringField", String.class), "baz");
assertEquals(RecordUtils.getRecordTemplateField(baz, "longField", Long.class), Long.valueOf(1234L));
}
@Test
public void testGetRecordTemplateUrnField() {
Urn urn = makeUrn(1);
EntitySnapshot snapshot = new EntitySnapshot().setUrn(urn);
assertEquals(RecordUtils.getRecordTemplateField(snapshot, "urn", Urn.class), urn);
}
@Test
public void testGetRecordTemplateWrappedField() throws IOException {
AspectBaz baz = loadAspectBaz("baz.json");
StringArray stringArray = RecordUtils.getRecordTemplateWrappedField(baz, "arrayField", StringArray.class);
assertEquals(stringArray.toArray(), new String[]{"1", "2", "3"});
}
@Test
public void testGetSelectedRecordTemplateFromUnion() throws IOException {
AspectBaz baz = new AspectBaz();
baz.setUnionField(new AspectBaz.UnionField());
baz.getUnionField().setAspectFoo(new AspectFoo().setValue("foo"));
RecordTemplate selected = RecordUtils.getSelectedRecordTemplateFromUnion(baz.getUnionField());
assertEquals(selected.getClass(), AspectFoo.class);
}
@Test
public void testSetSelectedRecordTemplateInUnion() throws IOException {
AspectBaz baz = new AspectBaz();
baz.setUnionField(new AspectBaz.UnionField());
AspectFoo expected = new AspectFoo().setValue("foo");
RecordUtils.setSelectedRecordTemplateInUnion(baz.getUnionField(), expected);
assertEquals(baz.getUnionField().getAspectFoo(), expected);
}
@Test
public void testGetValidMetadataSnapshotClassFromName() {
Class<? extends RecordTemplate> actualClass =
ModelUtils.getMetadataSnapshotClassFromName(EntitySnapshot.class.getCanonicalName());
assertEquals(actualClass, EntitySnapshot.class);
}
@Test(expectedExceptions = InvalidSchemaException.class)
public void testGetInvalidMetadataSnapshotClassFromName() {
ModelUtils.getMetadataSnapshotClassFromName(AspectInvalid.class.getCanonicalName());
}
@Test
public void testExtractAspectFromSingleAspectEntity() {
String field1 = "foo";
EntityValue value = new EntityValue();
value.setValue(field1);
AspectBar aspect = new AspectBar();
aspect.setValue(field1);
assertEquals(RecordUtils.extractAspectFromSingleAspectEntity(value, AspectBar.class), aspect);
}
@Test(description = "Test getFieldValue() when RecordTemplate has primitive fields")
public void testGetFieldValuePrimitive() {
// case 1: string field set, bool field isn't set, default field should return default value
final MixedRecord mixedRecord1 = new MixedRecord().setValue("fooVal1");
PathSpec ps1 = MixedRecord.fields().value();
PathSpec ps2 = MixedRecord.fields().flag();
PathSpec ps3 = MixedRecord.fields().defaultField();
Optional<Object> o1 = RecordUtils.getFieldValue(mixedRecord1, ps1);
Optional<Object> o2 = RecordUtils.getFieldValue(mixedRecord1, ps2);
Optional<Object> o3 = RecordUtils.getFieldValue(mixedRecord1, ps3);
assertEquals(o1.get(), "fooVal1");
assertFalse(o2.isPresent());
assertEquals(o3.get(), "defaultVal");
assertEquals(ps1.toString(), "/value");
assertEquals(ps2.toString(), "/flag");
assertEquals(ps3.toString(), "/defaultField");
// case 2: string and bool field both set
final MixedRecord mixedRecord2 = new MixedRecord().setValue("fooVal2").setFlag(true);
Object o4 = RecordUtils.getFieldValue(mixedRecord2, MixedRecord.fields().value()).get();
Object o5 = RecordUtils.getFieldValue(mixedRecord2, MixedRecord.fields().flag()).get();
assertEquals(o4, "fooVal2");
assertEquals(o5, true);
// case 3: similar to case1, just that pegasus path as string is used as input
Object o6 = RecordUtils.getFieldValue(mixedRecord1, "/value");
assertEquals(o6, o1);
}
@Test(description = "Test getFieldValue() when RecordTemplate has TypeRef field")
public void testGetFieldValueTypeRef() {
// case 1: Urn as the TypeRef
FooUrn urn = makeFooUrn(1);
final MixedRecord mixedRecord1 = new MixedRecord().setFooUrn(urn);
PathSpec ps1 = MixedRecord.fields().fooUrn();
Object o1 = RecordUtils.getFieldValue(mixedRecord1, ps1).get();
assertEquals(o1, urn);
assertEquals(ps1.toString(), "/fooUrn");
// case 2: TypeRef defined in the same pdl
final MixedRecord mixedRecord2 = new MixedRecord().setIntTypeRef(2);
PathSpec ps2 = MixedRecord.fields().intTypeRef();
Object o2 = RecordUtils.getFieldValue(mixedRecord2, ps2).get();
assertEquals(o2, 2);
assertEquals(ps2.toString(), "/intTypeRef");
// case 3: TypeRef for Record field reference
AspectFoo aspectFoo = new AspectFoo().setValue("fooVal");
PathSpec ps3 = MixedRecord.fields().recordTypeRef().value();
final MixedRecord mixedRecord3 = new MixedRecord().setRecordTypeRef(aspectFoo);
Object o3 = RecordUtils.getFieldValue(mixedRecord3, ps3).get();
assertEquals(o3, "fooVal");
assertEquals(ps3.toString(), "/recordTypeRef/value");
}
@Test(description = "Test getFieldValue() when RecordTemplate has another field of Record type")
public void testGetFieldValueRecordType() {
// case 1: referencing a field inside a RecordTemplate, one level deep.
AspectFoo foo1 = new AspectFoo().setValue("fooVal1");
MixedRecord mixedRecord1 = new MixedRecord().setRecordField(foo1);
PathSpec ps1f1 = MixedRecord.fields().recordField().value();
PathSpec ps1f2 =
MixedRecord.fields().nestedRecordField().foo().value(); // referencing a nullable record template field
Optional<Object> o1f1 = RecordUtils.getFieldValue(mixedRecord1, ps1f1);
Optional<Object> o1f2 = RecordUtils.getFieldValue(mixedRecord1, ps1f2);
assertEquals(o1f1.get(), "fooVal1");
assertEquals(ps1f1.toString(), "/recordField/value");
assertFalse(o1f2.isPresent());
assertEquals(ps1f2.toString(), "/nestedRecordField/foo/value");
// case 2: referencing a field inside a RecordTemplate, two levels deep i.e. nested field
AspectFoo foo2 = new AspectFoo().setValue("fooVal2");
com.datahub.test.testing.EntityValue entityValue = new com.datahub.test.testing.EntityValue().setFoo(foo2);
MixedRecord mixedRecord2 = new MixedRecord().setNestedRecordField(entityValue);
PathSpec ps2 = MixedRecord.fields().nestedRecordField().foo().value();
Object o2 = RecordUtils.getFieldValue(mixedRecord2, ps2).get();
assertEquals(o2, "fooVal2");
assertEquals(ps2.toString(), "/nestedRecordField/foo/value");
}
@Test(description = "Test getFieldValue() when RecordTemplate has field of type array")
public void testGetFieldValueArray() {
// case 1: array of strings
final MixedRecord mixedRecord1 =
new MixedRecord().setStringArray(new StringArray(Arrays.asList("val1", "val2", "val3", "val4")));
PathSpec ps1 = MixedRecord.fields().stringArray();
Object o1 = RecordUtils.getFieldValue(mixedRecord1, ps1).get();
assertEquals(o1, new StringArray(Arrays.asList("val1", "val2", "val3", "val4")));
assertEquals(ps1.toString(), "/stringArray");
// case 2: wildcard on array of records
AspectFoo aspectFoo1 = new AspectFoo().setValue("fooVal1");
AspectFoo aspectFoo2 = new AspectFoo().setValue("fooVal2");
AspectFoo aspectFoo3 = new AspectFoo().setValue("fooVal3");
AspectFoo aspectFoo4 = new AspectFoo().setValue("fooVal4");
final AspectFooArray aspectFooArray =
new AspectFooArray(Arrays.asList(aspectFoo1, aspectFoo2, aspectFoo3, aspectFoo4));
final MixedRecord mixedRecord2 = new MixedRecord().setRecordArray(aspectFooArray);
PathSpec ps2 = MixedRecord.fields().recordArray().items().value();
Object o2 = RecordUtils.getFieldValue(mixedRecord2, ps2).get();
assertEquals(o2, new StringArray(Arrays.asList("fooVal1", "fooVal2", "fooVal3", "fooVal4")));
assertEquals(ps2.toString(), "/recordArray/*/value");
// case 3: array of records is empty
final MixedRecord mixedRecord3 = new MixedRecord().setRecordArray(new AspectFooArray());
Object o3 = RecordUtils.getFieldValue(mixedRecord3, MixedRecord.fields().recordArray().items().value()).get();
assertEquals(o3, new StringArray());
// case 4: referencing an index of array is not supported
final MixedRecord mixedRecord4 = new MixedRecord().setRecordArray(aspectFooArray);
assertThrows(UnsupportedOperationException.class,
() -> RecordUtils.getFieldValue(mixedRecord4, "/recordArray/0/value"));
// case 5: referencing nested field inside array of records, field being 2 levels deep
AspectFoo f1 = new AspectFoo().setValue("val1");
AspectFoo f2 = new AspectFoo().setValue("val2");
com.datahub.test.testing.EntityValue val1 = new com.datahub.test.testing.EntityValue().setFoo(f1);
com.datahub.test.testing.EntityValue val2 = new com.datahub.test.testing.EntityValue().setFoo(f2);
EntityValueArray entityValues = new EntityValueArray(Arrays.asList(val1, val2));
final MixedRecord mixedRecord5 = new MixedRecord().setNestedRecordArray(entityValues);
PathSpec psFoo5 = MixedRecord.fields().nestedRecordArray().items().foo().value();
PathSpec psBar5 = MixedRecord.fields().nestedRecordArray().items().bar().value();
Optional<Object> oFoo5 = RecordUtils.getFieldValue(mixedRecord5, psFoo5);
Optional<Object> oBar5 = RecordUtils.getFieldValue(mixedRecord5, psBar5);
assertEquals(oFoo5.get(), new StringArray("val1", "val2"));
assertEquals(psFoo5.toString(), "/nestedRecordArray/*/foo/value");
assertEquals(oBar5.get(), new StringArray());
assertEquals(psBar5.toString(), "/nestedRecordArray/*/bar/value");
// case 6: optional field containing array of strings is not set
final MixedRecord mixedRecord6 = new MixedRecord();
PathSpec ps6 = MixedRecord.fields().stringArray();
Optional<Object> o6 = RecordUtils.getFieldValue(mixedRecord6, ps6);
assertFalse(o6.isPresent());
// case 7: optional field containing array of records is not set
final MixedRecord mixedRecord7 = new MixedRecord();
PathSpec ps7 = MixedRecord.fields().recordArray().items().value();
Optional<Object> o7 = RecordUtils.getFieldValue(mixedRecord7, ps7);
assertFalse(o7.isPresent());
}
@Test(description = "Test getFieldValue() when RecordTemplate has field of type array of primitive unions")
public void testGetFieldValueArrayOfPrimitiveUnions() {
// case 1: array of unions of strings
final MixedRecord mixedRecord1 =
new MixedRecord().setUnionArray(new StringUnionArray(Arrays.asList(
StringUnion.create("val1"),
StringUnion.create("val2"),
StringUnion.create("val3"),
StringUnion.create("val4")
)));
PathSpec ps1 = MixedRecord.fields().unionArray();
Object o1 = RecordUtils.getFieldValue(mixedRecord1, ps1).get();
PathSpec ps2 = MixedRecord.fields().unionArray().items();
Object o2 = RecordUtils.getFieldValue(mixedRecord1, ps2).get();
assertEquals(o1, new StringUnionArray(Arrays.asList(
StringUnion.create("val1"),
StringUnion.create("val2"),
StringUnion.create("val3"),
StringUnion.create("val4")
)));
assertEquals(ps1.toString(), "/unionArray");
assertEquals(o2, new StringUnionArray(Arrays.asList(
StringUnion.create("val1"),
StringUnion.create("val2"),
StringUnion.create("val3"),
StringUnion.create("val4")
)));
assertEquals(ps2.toString(), "/unionArray/*");
}
@Test
public void testCapitalizeFirst() {
String s = "field1";
assertEquals(RecordUtils.capitalizeFirst(s), "Field1");
s = "t";
assertEquals(RecordUtils.capitalizeFirst(s), "T");
s = "";
assertEquals(RecordUtils.capitalizeFirst(s), "");
}
private AspectBaz loadAspectBaz(String resourceName) throws IOException {
return RecordUtils.toRecordTemplate(AspectBaz.class,
IOUtils.toString(ClassLoader.getSystemResourceAsStream(resourceName), StandardCharsets.UTF_8));
}
}
| linkedin/WhereHows | li-utils/src/test/java/com/linkedin/common/util/RecordUtilsTest.java | Java | apache-2.0 | 16,237 |
/*
Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.harmony.vts.test.vm.jvmti;
/**
* @author Valentin Al. Sitnick
* @version $Revision: 1.1 $
*
*/
public class GetErrorName0102 {
static public void main(String args[]) {
return;
}
}
| freeVM/freeVM | enhanced/buildtest/tests/vts/vm/src/test/vm/jvmti/funcs/GetErrorName/GetErrorName0102/GetErrorName0102.java | Java | apache-2.0 | 877 |
input = """
supp(B) :- on(B,table,0).
supp(B) :- on(B,B1,0), supp(B1).
on(b0,table,0) :- true.
on(b1,b0,0) :- true.
on(B,L,0) | -on(B,L,0) :- block(B), location(L).
true.
location(L) :- block(L).
location(table) :- true.
block(b0).
block(b1).
block(b2).
"""
output = """
supp(B) :- on(B,table,0).
supp(B) :- on(B,B1,0), supp(B1).
on(b0,table,0) :- true.
on(b1,b0,0) :- true.
on(B,L,0) | -on(B,L,0) :- block(B), location(L).
true.
location(L) :- block(L).
location(table) :- true.
block(b0).
block(b1).
block(b2).
"""
| veltri/DLV2 | tests/parser/grounding.7.test.py | Python | apache-2.0 | 559 |
/*
Copyright 2011-2013 The Cassandra Consortium (cassandra-fp7.eu)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eu.cassandra.utils;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Map;
import java.util.Scanner;
import java.util.TreeMap;
import org.apache.log4j.Logger;
import org.jfree.chart.ChartFactory;
import org.jfree.chart.ChartUtilities;
import org.jfree.chart.JFreeChart;
import org.jfree.chart.plot.PlotOrientation;
import org.jfree.data.xy.XYSeries;
import org.jfree.data.xy.XYSeriesCollection;
import weka.clusterers.SimpleKMeans;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.AddCluster;
import eu.cassandra.appliance.Appliance;
import eu.cassandra.event.Event;
/**
* This class contains static functions that are used for general purposes
* throughout the Disaggregation Module.
*
* @author Antonios Chrysopoulos
* @version 0.9, Date: 29.07.2013
*/
public class Utils
{
static Logger log = Logger.getLogger(Utils.class);
/** Loading a library for integer programming. */
static {
System.loadLibrary("jniconstraintsolver");
}
/**
* This function is estimating the absolute euclidean distance of the active
* and reactive power vector distance of two points of interest in the form of
* arrays.
*
* @param a1
* The first array of values
* @param a2
* The second array of values
*
* @return the estimated absolute euclidean distance.
*/
public static double absoluteEuclideanDistance (double[] a1, double[] a2)
{
return Math.sqrt(Math.pow(a1[0] - a2[0], 2) + Math.pow(a1[1] - a2[1], 2));
}
/**
* This function is estimating the percentage euclidean distance of the active
* and reactive power vector distance of two points of interest in the form of
* arrays.
*
* @param a1
* The first array of values
*
* @param a2
* The second array of values
*
* @return the estimated percentage euclidean distance.
*/
public static double percentageEuclideanDistance (double[] a1, double[] a2)
{
return 100
* Math.sqrt(Math.pow(a1[0] - a2[0], 2) + Math.pow(a1[1] - a2[1], 2))
/ norm(a1);
}
/**
* This function is estimating the euclidean length (or norm) of an array of
* two values
*
* @param poi
* The point of interest's array of values
* @return the euclidean length of the array.
*/
public static double norm (double[] poi)
{
return Math.sqrt(Math.pow(poi[0], 2) + Math.pow(poi[1], 2));
}
/**
* This function is used in order to check if a certain appliance is within
* the permitted limits
*
* @param trueValue
* The value under examination
* @param limit
* The limit value that is used as threshold
* @return true if it is within limits, false otherwise
*/
public static boolean checkLimit (double trueValue, double limit)
{
double lowerLimit = (1 - Constants.ERROR_FRINGE) * limit;
return (trueValue > lowerLimit);
}
/**
* This function is used in order to check if a certain appliance is within
* the permitted limits
*
* @param trueValue
* The value under examination
* @param limit
* The limit value that is used as threshold
* @return true if it is within limits, false otherwise
*/
public static boolean checkLimitFridge (double trueValue, double limit)
{
double lowerLimit = 0, upperLimit = 0;
if (Constants.REF_LOOSE_COUPLING) {
lowerLimit = Constants.REF_DURATION_FRINGE - limit;
upperLimit = Constants.REF_DURATION_FRINGE + limit;
}
else {
lowerLimit = (1 - Constants.STRICT_REF_DURATION_FRINGE) * limit;
upperLimit = (1 + Constants.STRICT_REF_DURATION_FRINGE) * limit;
}
log.debug("True Value: " + trueValue + " Limit: " + limit + " UpperLimit: "
+ upperLimit + " Lower Limit: " + lowerLimit);
return (trueValue < upperLimit && trueValue > lowerLimit);
}
/**
* This function is used in order to check if a certain appliance is within
* the permitted limits
*
* @param trueValue
* The value under examination
* @param limit
* The limit value that is used as threshold
* @return true if it is within limits, false otherwise
*/
public static double pairingLimit (double trueValue)
{
double upperLimit = (1 + Constants.PAIR_ERROR_FRINGE) * trueValue;
return upperLimit;
}
/**
* This function is used for the detection of reduction points following a
* rising point, so that there is a possibility they can be connected as a
* pair
*
* @param index
* The index of the rising point of interest.
* @param pois
* The list of points of interest under examination.
* @return an array of indices that contain possible combinatorial reduction
* points of interest.
*/
public static Integer[] findRedPoints (int index,
ArrayList<PointOfInterest> pois)
{
ArrayList<Integer> temp = new ArrayList<Integer>();
double limit = pairingLimit(pois.get(index).getPDiff());
double timeLimit = Double.POSITIVE_INFINITY;
if (Constants.SIMPLE_TIME_COMPLEXITY == true)
timeLimit = pois.get(index).getMinute() + Constants.TEMPORAL_THRESHOLD;
for (int i = index + 1; i < pois.size(); i++)
if (pois.get(i).getRising() == false
&& pois.get(i).getMinute() <= timeLimit
&& limit > -pois.get(i).getPDiff())
temp.add(i);
Integer[] result = new Integer[temp.size()];
for (int i = 0; i < temp.size(); i++)
result[i] = temp.get(i);
return result;
}
/**
* This function is used for the detection of rising points preceding a
* reduction point, so that there is a possibility they can be connected as a
* pair
*
* @param index
* The index of the reduction point of interest.
* @param pois
* The list of points of interest under examination.
* @return an array of indices that contain possible combinatorial rising
* points of interest.
*/
public static Integer[] findRisPoints (int index,
ArrayList<PointOfInterest> pois)
{
ArrayList<Integer> temp = new ArrayList<Integer>();
double limit = -pairingLimit(pois.get(index).getPDiff());
double timeLimit = Double.NEGATIVE_INFINITY;
if (Constants.SIMPLE_TIME_COMPLEXITY == true)
timeLimit = pois.get(index).getMinute() - Constants.TEMPORAL_THRESHOLD;
for (int i = 0; i < index; i++)
if (pois.get(i).getRising() && pois.get(i).getMinute() >= timeLimit
&& limit > pois.get(i).getPDiff())
temp.add(i);
Integer[] result = new Integer[temp.size()];
for (int i = 0; i < temp.size(); i++)
result[i] = temp.get(i);
return result;
}
/**
* This is an auxiliary function used to estimate the mean values of a pair of
* points of interest.
*
* @param pois
* The pair of points of interest under examination.
* @return an array of the mean values of active and reactive power.
*/
public static double[] meanValues (PointOfInterest[] pois)
{
double[] result =
{ (pois[0].getPDiff() - pois[1].getPDiff()) / 2,
(pois[0].getQDiff() - pois[1].getQDiff()) / 2 };
return result;
}
/**
* This function is used for the creation of final matching pairs of points of
* interest from the solutions that the integer programming solver has
* provided.
*
* @param pois
* The list of points of interest under examination.
* @param array
* An array of 0-1 that shows which points of interest are included
* in the solution.
* @return a list of pairs of points of interest.
*/
public static ArrayList<PointOfInterest[]>
createFinalPairs (ArrayList<PointOfInterest> pois, int[] array)
{
// Initializing the auxiliary variables.
ArrayList<PointOfInterest[]> result = new ArrayList<PointOfInterest[]>();
ArrayList<PointOfInterest> rising = new ArrayList<PointOfInterest>();
ArrayList<PointOfInterest> reduction = new ArrayList<PointOfInterest>();
// For all the points if the are 1 are included in the solution
for (int i = 0; i < array.length; i++) {
if (array[i] == 1) {
if (pois.get(i).getRising())
rising.add(pois.get(i));
else
reduction.add(pois.get(i));
}
}
// If there are one of each point types.
if (rising.size() == 1 && reduction.size() == 1) {
PointOfInterest[] temp = { rising.get(0), reduction.get(0) };
result.add(temp);
}
// If there is only one rising
else if (rising.size() == 1) {
for (PointOfInterest red: reduction) {
PointOfInterest start =
new PointOfInterest(rising.get(0).getMinute(), true, -red.getPDiff(),
-red.getQDiff());
PointOfInterest[] temp = { start, red };
result.add(temp);
}
}
// If there is only one reduction
else {
for (PointOfInterest rise: rising) {
PointOfInterest end =
new PointOfInterest(reduction.get(0).getMinute(), false,
-rise.getPDiff(), -rise.getQDiff());
PointOfInterest[] temp = { rise, end };
result.add(temp);
}
}
return result;
}
/**
* This function is used to extract the file name from a path of a file,
* excluding the file extension.
*
* @param filename
* The full name and path of the file of interest.
* @return The name of the file without the file extension.
*/
public static String getFileName (String filename)
{
return filename.substring(0, filename.length() - 4);
}
/**
* This function is used in order to create clusters of points of interest
* based on the active power difference they have.
*
* @param pois
* The list of points of interest that will be clustered.
* @return The newly created clusters with the points that are comprising
* them.
* @throws Exception
*/
public static ArrayList<ArrayList<PointOfInterest>>
clusterPoints (ArrayList<PointOfInterest> pois, int bias) throws Exception
{
// Initialize the auxiliary variables
ArrayList<ArrayList<PointOfInterest>> result =
new ArrayList<ArrayList<PointOfInterest>>();
// Estimating the number of clusters that will be created
int numberOfClusters =
(int) (Math.ceil((double) pois.size()
/ (double) Constants.MAX_POINTS_OF_INTEREST))
+ bias;
log.info("Clusters: " + pois.size() + " / "
+ Constants.MAX_POINTS_OF_INTEREST + " + " + bias + " = "
+ numberOfClusters);
// Create a new empty list of points for each cluster
for (int i = 0; i < numberOfClusters; i++)
result.add(new ArrayList<PointOfInterest>());
// Initializing auxiliary variables namely the attributes of the data set
Attribute id = new Attribute("id");
Attribute pDiffRise = new Attribute("pDiff");
ArrayList<Attribute> attr = new ArrayList<Attribute>();
attr.add(id);
attr.add(pDiffRise);
Instances instances = new Instances("Points of Interest", attr, 0);
// Each event is translated to an instance with the above attributes
for (int i = 0; i < pois.size(); i++) {
Instance inst = new DenseInstance(2);
inst.setValue(id, i);
inst.setValue(pDiffRise, Math.abs(pois.get(i).getPDiff()));
instances.add(inst);
}
// System.out.println(instances.toString());
Instances newInst = null;
log.debug("Instances: " + instances.toSummaryString());
// Create the addcluster filter of Weka and the set up the hierarchical
// clusterer.
AddCluster addcluster = new AddCluster();
SimpleKMeans kmeans = new SimpleKMeans();
kmeans.setSeed(numberOfClusters);
// This is the important parameter to set
kmeans.setPreserveInstancesOrder(true);
kmeans.setNumClusters(numberOfClusters);
kmeans.buildClusterer(instances);
addcluster.setClusterer(kmeans);
addcluster.setInputFormat(instances);
addcluster.setIgnoredAttributeIndices("1");
// Cluster data set
newInst = Filter.useFilter(instances, addcluster);
// System.out.println(newInst.toString());
// Parse through the dataset to see where each point is placed in the
// clusters.
for (int i = 0; i < newInst.size(); i++) {
String cluster = newInst.get(i).stringValue(newInst.attribute(2));
cluster = cluster.replace("cluster", "");
log.debug("Point of Interest: " + i + " Cluster: " + cluster);
result.get(Integer.parseInt(cluster) - 1).add(pois.get(i));
}
// Sorting the each cluster points by their minutes.
for (int i = result.size() - 1; i >= 0; i--) {
if (result.get(i).size() == 0)
result.remove(i);
else
Collections.sort(result.get(i), Constants.comp);
}
// Sorting the all clusters by their active power.
Collections.sort(result, Constants.comp5);
return result;
}
/**
* This function is utilized for the extraction of the points that are not
* combined with other ones in order to create the final pairs of operation.
*
* @param pois
* The list of all the points of interest.
* @param solution
* This is the list that contains the solution vectors for the points
* of interest.
* @param solutionArray
* This array contains the indices of the points of interest
* participating in each solution.
* @return
*/
public static ArrayList<PointOfInterest>
extractRemainingPoints (ArrayList<PointOfInterest> pois,
ArrayList<Integer> solution, int[][] solutionArray)
{
ArrayList<PointOfInterest> result = new ArrayList<PointOfInterest>();
int[] tempArray = new int[solutionArray[0].length];
for (Integer index: solution)
for (int i = 0; i < solutionArray[index].length; i++)
if (solutionArray[index][i] == 1)
tempArray[i] = 1;
// System.out.println("TempArray:" + Arrays.toString(tempArray));
for (int i = 0; i < tempArray.length; i++)
if (tempArray[i] == 0)
result.add(pois.get(i));
if (result.size() == 0)
result = null;
return result;
}
/**
* This function is used to remove the smallest points of interest from a list
* in order to make its size viable to estimate the pairs.
*
* @param pois
* The list of points of interest.
* @return The list of points of interest with a percentage of the points
* removed.
*/
public static ArrayList<PointOfInterest>
removePoints (ArrayList<PointOfInterest> pois)
{
ArrayList<PointOfInterest> result = new ArrayList<PointOfInterest>();
int number = pois.size() - Constants.REMOVAL_MAX_POINTS;
log.debug("Initial Size: " + pois.size() + " Removing: " + number);
Collections.sort(pois, Constants.comp4);
log.debug("Initial POIS: " + pois.toString());
Collections.sort(result, Constants.comp4);
for (int i = 0; i < number; i++)
result.add(pois.remove(pois.size() - 1));
log.debug("Removed POIS: " + result.toString());
return result;
}
/**
* This function is used in order to find the maximum value from an array.
*
* @param matrix
* @return
*/
public static double findMax (double[] matrix)
{
double result = Double.NEGATIVE_INFINITY;
for (int i = 0; i < matrix.length; i++)
if (result < matrix[i])
result = matrix[i];
return result;
}
/**
* This function is used in order to find the maximum value from an array.
*
* @param matrix
* @return
*/
public static double findMax (ArrayList<Double> matrix)
{
double result = Double.NEGATIVE_INFINITY;
for (int i = 0; i < matrix.size(); i++)
if (result < matrix.get(i))
result = matrix.get(i);
return result;
}
/**
* This function is used when the user has already tracked the electrical
* appliances installed in the installation. He can used them as a base case
* and extend it with any additional ones that may be found during the later
* stages of analysis of the consumption.
*
* @param filename
* The filename of the file containing the appliances.
* @return
* A list of appliances
* @throws FileNotFoundException
*/
public static ArrayList<Appliance> appliancesFromFile (String filename)
throws FileNotFoundException
{
// Read appliance file and start appliance parsing
File file = new File(filename);
Scanner input = new Scanner(file);
ArrayList<Appliance> appliances = new ArrayList<Appliance>();
String nextLine;
String[] line;
while (input.hasNextLine()) {
nextLine = input.nextLine();
line = nextLine.split(",");
String name = line[0];
String activity = line[1];
if (activity.contains("Standby") == false
&& activity.contains("Refrigeration") == false) {
double p = Double.parseDouble(line[2]);
double q = Double.parseDouble(line[3]);
// For each appliance found in the file, an temporary Appliance
// Entity is created.
appliances.add(new Appliance(name, activity, p, q, 0, 100));
}
}
System.out.println("Appliances:" + appliances.size());
input.close();
return appliances;
}
/**
* This is an auxiliary function used to check if the distance in time and
* space of a pair is close to this appliance, meaning that it belongs to this
* appliance.
*
* @param mean
* The mean active and reactive power measurements.
* @param duration
* The duration of the end-use.
* @param metrics
* The metrics that are the base level
* @return true if it is close, false otherwise.
*/
public static boolean isCloseRef (double[] mean, int duration,
double[] metrics)
{
double[] meanValues = { metrics[0], metrics[1] };
return ((Utils.percentageEuclideanDistance(mean, meanValues) < Constants.REF_THRESHOLD) && (Utils
.checkLimitFridge(duration, metrics[2])));
}
/**
* This function is called when the temporary files must be removed from the
* temporary folder used to store the csv and xls used to create the entity
* models during the procedure of training and disaggregation. It is done when
* the program starts, when the program ends and when the reset button is
* pressed by the user.
*/
public static void cleanFiles ()
{
File directory = new File("TempFiles");
File files[] = directory.listFiles();
String extension = "";
for (int index = 0; index < files.length; index++) {
{
extension =
files[index].getAbsolutePath().substring(files[index]
.getAbsolutePath()
.length() - 3,
files[index]
.getAbsolutePath()
.length());
if (extension.equalsIgnoreCase("csv")) {
boolean wasDeleted = files[index].delete();
if (!wasDeleted) {
System.out.println("Not Deleted File " + files[index].toString());
}
}
}
}
}
public static double estimateThreshold (double[] power, boolean median)
{
double result = 0;
ArrayList<Double> minimums = new ArrayList<Double>();
double min = Double.POSITIVE_INFINITY;
for (int i = 0; i < power.length; i++) {
if (min > power[i])
min = power[i];
if (i % 1440 == 0 && i != 0) {
minimums.add(min);
min = Double.POSITIVE_INFINITY;
}
}
if (minimums.size() == 0)
minimums.add(min);
log.debug("================THRESHOLD SETTING================");
log.debug("Minimums: " + minimums.toString());
log.debug("Median:" + median);
if (median)
result = Utils.estimateMedian(minimums);
else
result = Utils.estimateMean(minimums);
log.debug("Resulting threshold: " + result);
log.debug("");
log.debug("");
return result;
}
public static double estimateMedian (ArrayList<Double> values)
{
double result = 0.0;
int index = -1;
Collections.sort(values);
log.info("Values: " + values);
if (values.size() == 2)
index = 0;
else
index = values.size() / 2;
if (values.size() % 2 == 0)
result = (values.get(index) + values.get(index + 1)) / 2;
else
result = values.get(index);
log.info("Result:" + result);
return result;
}
public static double estimateMean (ArrayList<Double> values)
{
double result = 0.0;
double sum = 0.0;
for (double minimum: values)
sum += minimum;
result = sum / values.size();
return result;
}
public static double estimateStd (ArrayList<Double> values, double mean)
{
double result = 0.0;
double sum = 0;
for (double value: values)
sum += Math.pow((value - mean), 2);
sum /= values.size();
result = Math.sqrt(sum);
return result;
}
/**
* This is an auxiliary function used for checking if all the points of
* interest are of the same type.
*
* @param pois
* A list of points of interest
* @return true if they are all of the same type, false otherwise.
*/
public static boolean allSamePoints (ArrayList<PointOfInterest> pois)
{
// Initializing the auxiliary variables
boolean flag = true;
boolean start = pois.get(0).getRising();
for (PointOfInterest poi: pois)
if (start != poi.getRising()) {
flag = false;
break;
}
return flag;
}
public static double[] normalizeReactive (Event event)
{
double[] result = new double[event.getReactivePowerConsumptions().length];
return result;
}
/**
* This function is used for the visualization of a Line Diagram.
*
* @param title
* The title of the chart.
* @param x
* The unit on the X axis of the chart.
* @param y
* The unit on the Y axis of the chart.
* @param data
* The array of values.
* @return a chart panel with the graphical representation.
*/
public static void createLineDiagram (String title, String x, String y,
ArrayList<Double> data)
{
XYSeries series1 = new XYSeries("Active Power");
for (int i = 0; i < data.size(); i++) {
series1.add(i, data.get(i));
}
XYSeriesCollection dataset = new XYSeriesCollection();
dataset.addSeries(series1);
PlotOrientation orientation = PlotOrientation.VERTICAL;
boolean show = true;
boolean toolTips = false;
boolean urls = false;
JFreeChart chart =
ChartFactory.createXYLineChart(title, x, y, dataset, orientation, show,
toolTips, urls);
int width = 1024;
int height = 768;
try {
ChartUtilities.saveChartAsPNG(new File(Constants.chartFolder + title
+ ".PNG"), chart, width, height);
}
catch (IOException e) {
}
}
public static double countPoints (int[] points)
{
int counter = 0;
for (int i = 0; i < points.length; i++)
if (points[i] == 1)
counter++;
return counter;
}
public static void durationCheck (ArrayList<Event> events)
{
log.info("====================DURATIONS========================");
ArrayList<Integer> durations = new ArrayList<Integer>();
int start = -1, end = -1, counter = 0;
int duration = -1;
for (Event event: events) {
start = event.getStartMinute();
end = event.getEndMinute();
duration = end - start;
if (duration > Constants.MINUTES_PER_DAY) {
counter++;
log.info("Start:" + +start + " End: " + end + " Duration:" + duration);
}
durations.add(duration);
}
Collections.sort(durations);
log.info("Durations:" + durations.toString());
log.info("Events over a day: " + counter);
}
public static void
removePoints (ArrayList<PointOfInterest> points, int minute)
{
int i = 0;
for (i = 0; i < points.size(); i++)
if (points.get(i).getMinute() == minute)
break;
points.remove(i);
}
public static Map<Double, Double>
estimateCumulativeValues (ArrayList<Double> dataset)
{
log.info("============ESTIMATE CUMULATIVE VALUES==================");
Map<Double, Double> result = new TreeMap<Double, Double>();
double mean = estimateMean(dataset);
double std = estimateStd(dataset, mean);
log.info("Mean: " + mean);
log.info("Standard Deviation: " + std);
for (Double value: dataset)
if (result.containsKey(value) == false)
result.put(value, 1 - Gaussian.bigPhi(value, mean, std));
// System.out.println(result.toString());
return result;
}
}
| cassandra-project/disaggregation | src/eu/cassandra/utils/Utils.java | Java | apache-2.0 | 26,163 |
// Copyright 2006 The Closure Library Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview tmpnetwork.js contains some temporary networking functions
* for browserchannel which will be moved at a later date.
*/
/**
* Namespace for BrowserChannel
*/
goog.provide('goog.net.tmpnetwork');
goog.require('goog.Uri');
goog.require('goog.net.ChannelDebug');
/**
* Default timeout to allow for google.com pings.
* @type {number}
*/
goog.net.tmpnetwork.GOOGLECOM_TIMEOUT = 10000;
goog.net.tmpnetwork.testGoogleCom = function(callback, opt_imageUri) {
// We need to add a 'rand' to make sure the response is not fulfilled
// by browser cache.
var uri = opt_imageUri;
if (!uri) {
uri = new goog.Uri('//www.google.com/images/cleardot.gif');
uri.makeUnique();
}
goog.net.tmpnetwork.testLoadImage(uri.toString(),
goog.net.tmpnetwork.GOOGLECOM_TIMEOUT, callback);
};
/**
* Test loading the given image, retrying if necessary.
* @param {string} url URL to the iamge.
* @param {number} timeout Milliseconds before giving up.
* @param {Function} callback Function to call with results.
* @param {number} retries The number of times to retry.
* @param {number=} opt_pauseBetweenRetriesMS Optional number of milliseconds
* between retries - defaults to 0.
*/
goog.net.tmpnetwork.testLoadImageWithRetries = function(url, timeout, callback,
retries, opt_pauseBetweenRetriesMS) {
var channelDebug = new goog.net.ChannelDebug();
channelDebug.debug('TestLoadImageWithRetries: ' + opt_pauseBetweenRetriesMS);
if (retries == 0) {
// no more retries, give up
callback(false);
return;
}
var pauseBetweenRetries = opt_pauseBetweenRetriesMS || 0;
retries--;
goog.net.tmpnetwork.testLoadImage(url, timeout, function(succeeded) {
if (succeeded) {
callback(true);
} else {
// try again
goog.global.setTimeout(function() {
goog.net.tmpnetwork.testLoadImageWithRetries(url, timeout, callback,
retries, pauseBetweenRetries);
}, pauseBetweenRetries);
}
});
};
/**
* Test loading the given image.
* @param {string} url URL to the iamge.
* @param {number} timeout Milliseconds before giving up.
* @param {Function} callback Function to call with results.
*/
goog.net.tmpnetwork.testLoadImage = function(url, timeout, callback) {
var channelDebug = new goog.net.ChannelDebug();
channelDebug.debug('TestLoadImage: loading ' + url);
var img = new Image();
var timer = null;
createHandler = function(result, message) {
return function() {
try {
channelDebug.debug('TestLoadImage: ' + message);
goog.net.tmpnetwork.clearImageCallbacks_(img);
goog.global.clearTimeout(timer);
callback(result);
} catch (e) {
channelDebug.dumpException(e);
}
};
};
img.onload = createHandler(true, 'loaded');
img.onerror = createHandler(false, 'error');
img.onabort = createHandler(false, 'abort');
img.ontimeout = createHandler(false, 'timeout');
timer = goog.global.setTimeout(function() {
if (img.ontimeout) {
img.ontimeout();
}
}, timeout);
img.src = url;
};
/**
* Clear handlers to avoid memory leaks.
* @param {Image} img The image to clear handlers from.
* @private
*/
goog.net.tmpnetwork.clearImageCallbacks_ = function(img) {
// NOTE(user): Nullified individually to avoid compiler warnings
// (BUG 658126)
img.onload = null;
img.onerror = null;
img.onabort = null;
img.ontimeout = null;
};
| Digaku/closure-library | closure/goog/net/tmpnetwork.js | JavaScript | apache-2.0 | 4,070 |
/**
* Created by chensheng on 15/8/3.
*/
'use strict';
(function (ns) {
ns.Publisher = tp.view.Loader.extend({
events: {
'change [name=publisher_type]': 'publisherType_changeHandler',
'change [name=province]': 'province_changeHandler'
},
initialize: function (options) {
tp.view.Loader.prototype.initialize.call(this, options);
this.optionTemplate = Handlebars.compile('{{#each cities}}<option value="{{.}}">{{.}}</option>{{/each}}');
},
render: function () {
tp.view.Loader.prototype.render.call(this);
var province = this.model.get('province');
this.$('[name=province]').val(province);
this.renderCities(this.model.options.provinces.indexOf(province));
},
renderCities: function (province) {
var cities = this.model.options.cities;
this.$('[name=city]').html(this.optionTemplate({cities: cities[province]}));
},
province_changeHandler: function (event) {
var province = event.target.selectedIndex;
this.renderCities(province);
},
publisherType_changeHandler: function (event) {
var className = $(event.currentTarget).data('class');
this.$('.personal, .corp').not('.' + className).addClass('hide');
this.$('.' + className).removeClass('hide');
}
});
}(Nervenet.createNameSpace('admin.page')));
| RyanTech/admin-v5 | js/page/Publisher.js | JavaScript | apache-2.0 | 1,341 |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"context"
"fmt"
"strconv"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
)
const (
// how long to wait for a resource quota update to occur
resourceQuotaTimeout = 30 * time.Second
podName = "pfpod"
)
var classGold = "gold"
var extendedResourceName = "example.com/dongle"
var _ = SIGDescribe("ResourceQuota", func() {
f := framework.NewDefaultFramework("resourcequota")
/*
Release: v1.16
Testname: ResourceQuota, object count quota, resourcequotas
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
*/
framework.ConformanceIt("should create a ResourceQuota and ensure its status is promptly calculated.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, service
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a Service. Its creation MUST be successful and resource usage count against the Service object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the Service. Deletion MUST succeed and resource usage count against the Service object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a service.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a Service")
service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP)
service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), service, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures service creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceServices] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a Service")
err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), service.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceServices] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, secret
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a Secret. Its creation MUST be successful and resource usage count against the Secret object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the Secret. Deletion MUST succeed and resource usage count against the Secret object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a secret.", func() {
ginkgo.By("Discovering how many secrets are in namespace by default")
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(secrets.Items) == found {
// loop until the number of secrets has stabilized for 5 seconds
unchanged++
return unchanged > 4, nil
}
unchanged = 0
found = len(secrets.Items)
return false, nil
})
defaultSecrets := fmt.Sprintf("%d", found)
hardSecrets := fmt.Sprintf("%d", found+1)
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota.Spec.Hard[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a Secret")
secret := newTestSecretForQuota("test-secret")
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures secret creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
// we expect there to be two secrets because each namespace will receive
// a service account token secret by default
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a secret")
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, pod
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a Pod with resource request count for CPU, Memory, EphemeralStorage and ExtendedResourceName. Pod creation MUST be successful and respective resource usage count MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Create another Pod with resource request exceeding remaining quota. Pod creation MUST fail as the request exceeds ResourceQuota limits.
Update the successfully created pod's resource requests. Updation MUST fail as a Pod can not dynamically update its resource requirements.
Delete the successfully created Pod. Pod Deletion MUST be scuccessful and it MUST release the allocated resource counts from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a pod.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a Pod that fits quota")
podName := "test-pod"
requests := v1.ResourceList{}
limits := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("252Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
podToUpdate := pod
ginkgo.By("Ensuring ResourceQuota status captures the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = requests[v1.ResourceName(extendedResourceName)]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Not allowing a pod to be created that exceeds remaining quota")
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("600m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectError(err)
ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)")
requests = v1.ResourceList{}
limits = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectError(err)
ginkgo.By("Ensuring a pod cannot update its resource requirements")
// a pod cannot dynamically update its resource requirements.
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("100m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), podToUpdate, metav1.UpdateOptions{})
framework.ExpectError(err)
ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceCPU] = resource.MustParse("0")
usedResources[v1.ResourceMemory] = resource.MustParse("0")
usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, configmap
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a ConfigMap. Its creation MUST be successful and resource usage count against the ConfigMap object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ConfigMap. Deletion MUST succeed and resource usage count against the ConfigMap object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a configMap.", func() {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
wait.Poll(1*time.Second, time.Minute, func() (bool, error) {
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(configmaps.Items) == found {
// loop until the number of configmaps has stabilized for 15 seconds
unchanged++
return unchanged > 15, nil
}
unchanged = 0
found = len(configmaps.Items)
return false, nil
})
defaultConfigMaps := fmt.Sprintf("%d", found)
hardConfigMaps := fmt.Sprintf("%d", found+1)
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ConfigMap")
configMap := newTestConfigMapForQuota("test-configmap")
configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures configMap creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
// we expect there to be two configmaps because each namespace will receive
// a ca.crt configmap by default.
// ref:https://github.com/kubernetes/kubernetes/pull/68812
usedResources[v1.ResourceConfigMaps] = resource.MustParse(hardConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a ConfigMap")
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, replicationController
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a ReplicationController. Its creation MUST be successful and resource usage count against the ReplicationController object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ReplicationController. Deletion MUST succeed and resource usage count against the ReplicationController object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a replication controller.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ReplicationController")
replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0)
replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), replicationController, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures replication controller creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a ReplicationController")
// Without the delete options, the object isn't actually
// removed until the GC verifies that all children have been
// detached. ReplicationControllers default to "orphan", which
// is different from most resources. (Why? To preserve a common
// workflow from prior to the GC's introduction.)
err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(context.TODO(), replicationController.Name, metav1.DeleteOptions{
PropagationPolicy: func() *metav1.DeletionPropagation {
p := metav1.DeletePropagationBackground
return &p
}(),
})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, replicaSet
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a ReplicaSet. Its creation MUST be successful and resource usage count against the ReplicaSet object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ReplicaSet. Deletion MUST succeed and resource usage count against the ReplicaSet object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a replica set.", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ReplicaSet")
replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0)
replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), replicaSet, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures replicaset creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a ReplicaSet")
err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(context.TODO(), replicaSet.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, pvc
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create PersistentVolumeClaim (PVC) to request storage capacity of 1G. PVC creation MUST be successful and resource usage count against the PVC and storage object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the PVC. Deletion MUST succeed and resource usage count against its PVC and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota.
[NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour.
*/
ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim. [sig-storage]", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a PersistentVolumeClaim")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures persistent volume claim creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, object count quota, storageClass
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create PersistentVolumeClaim (PVC) with specified storageClass to request storage capacity of 1G. PVC creation MUST be successful and resource usage count against PVC, storageClass and storage object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the PVC. Deletion MUST succeed and resource usage count against PVC, storageClass and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota.
[NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour.
*/
ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class. [sig-storage]", func() {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a PersistentVolumeClaim with storage class")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc.Spec.StorageClassName = &classGold
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures persistent volume claim creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("1")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should create a ResourceQuota and capture the life of a custom resource.", func() {
ginkgo.By("Creating a Custom Resource Definition")
testcrd, err := crd.CreateTestCRD(f)
framework.ExpectNoError(err)
defer testcrd.CleanUp()
countResourceName := "count/" + testcrd.Crd.Spec.Names.Plural + "." + testcrd.Crd.Spec.Group
// resourcequota controller needs to take 30 seconds at most to detect the new custom resource.
// in order to make sure the resourcequota controller knows this resource, we create one test
// resourcequota object, and triggering updates on it until the status is updated.
quotaName := "quota-for-" + testcrd.Crd.Spec.Names.Plural
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: quotaName},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceName(countResourceName): resource.MustParse("0"),
},
},
})
framework.ExpectNoError(err)
err = updateResourceQuotaUntilUsageAppears(f.ClientSet, f.Namespace.Name, quotaName, v1.ResourceName(countResourceName))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(context.TODO(), quotaName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota")
quotaName = "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota.Spec.Hard[v1.ResourceName(countResourceName)] = resource.MustParse("1")
_, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a custom resource")
resourceClient := testcrd.DynamicClients["v1"]
testcr, err := instantiateCustomResource(&unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": testcrd.Crd.Spec.Group + "/" + testcrd.Crd.Spec.Versions[0].Name,
"kind": testcrd.Crd.Spec.Names.Kind,
"metadata": map[string]interface{}{
"name": "test-cr-1",
},
},
}, resourceClient, testcrd.Crd)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures custom resource creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a second custom resource")
_, err = instantiateCustomResource(&unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": testcrd.Crd.Spec.Group + "/" + testcrd.Crd.Spec.Versions[0].Name,
"kind": testcrd.Crd.Spec.Names.Kind,
"metadata": map[string]interface{}{
"name": "test-cr-2",
},
},
}, resourceClient, testcrd.Crd)
// since we only give one quota, this creation should fail.
framework.ExpectError(err)
ginkgo.By("Deleting a custom resource")
err = deleteCustomResource(resourceClient, testcr.GetName())
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, quota scope, Terminating and NotTerminating scope
Description: Create two ResourceQuotas, one with 'Terminating' scope and another 'NotTerminating' scope. Request and the limit counts for CPU and Memory resources are set for the ResourceQuota. Creation MUST be successful and their ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a Pod with specified CPU and Memory ResourceRequirements fall within quota limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'NotTerminating' scoped ResourceQuota but MUST NOT in 'Terminating' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'NotTerminating' scoped ResourceQuota.
Create a pod with specified activeDeadlineSeconds and resourceRequirements for CPU and Memory fall within quota limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'Terminating' scoped ResourceQuota but MUST NOT in 'NotTerminating' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'Terminating' scoped ResourceQuota.
*/
framework.ConformanceIt("should verify ResourceQuota with terminating scopes.", func() {
ginkgo.By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a long running pod")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a terminating pod")
podName = "terminating-pod"
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, quota scope, BestEffort and NotBestEffort scope
Description: Create two ResourceQuotas, one with 'BestEffort' scope and another with 'NotBestEffort' scope. Creation MUST be successful and their ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
Create a 'BestEffort' Pod by not explicitly specifying resource limits and requests. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'BestEffort' scoped ResourceQuota but MUST NOT in 'NotBestEffort' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'BestEffort' scoped ResourceQuota.
Create a 'NotBestEffort' Pod by explicitly specifying resource limits and requests. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota but MUST NOT in 'BestEffort' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota.
*/
framework.ConformanceIt("should verify ResourceQuota with best effort scope.", func() {
ginkgo.By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a not best-effort pod")
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
})
/*
Release: v1.16
Testname: ResourceQuota, update and delete
Description: Create a ResourceQuota for CPU and Memory quota limits. Creation MUST be successful.
When ResourceQuota is updated to modify CPU and Memory quota limits, update MUST succeed with updated values for CPU and Memory limits.
When ResourceQuota is deleted, it MUST not be available in the namespace.
*/
framework.ConformanceIt("should be able to update and delete ResourceQuota.", func() {
client := f.ClientSet
ns := f.Namespace.Name
ginkgo.By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := &v1.ResourceQuota{
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{},
},
}
resourceQuota.ObjectMeta.Name = quotaName
resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("1")
resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("500Mi")
_, err := createResourceQuota(client, ns, resourceQuota)
framework.ExpectNoError(err)
ginkgo.By("Getting a ResourceQuota")
resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("1"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("500Mi"))
ginkgo.By("Updating a ResourceQuota")
resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("2")
resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("1Gi")
resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota, metav1.UpdateOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi"))
ginkgo.By("Verifying a ResourceQuota was modified")
resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi"))
ginkgo.By("Deleting a ResourceQuota")
err = deleteResourceQuota(client, ns, quotaName)
framework.ExpectNoError(err)
ginkgo.By("Verifying the deleted ResourceQuota")
_, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsNotFound(err), true)
})
})
var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
f := framework.NewDefaultFramework("scope-selectors")
ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func() {
ginkgo.By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a not best-effort pod")
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota with terminating scopes through scope selectors.", func() {
ginkgo.By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a long running pod")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a terminating pod")
podName = "terminating-pod"
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
framework.ExpectNoError(err)
})
})
var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
f := framework.NewDefaultFramework("resourcequota-priorityclass")
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass1"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class")
podName := "testpod-pclass1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass2"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating first pod with priority class should pass")
podName := "testpod-pclass2-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating 2nd pod with priority class should fail")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{})
framework.ExpectError(err)
ginkgo.By("Deleting first pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass4"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class with pclass3")
podName := "testpod-pclass3-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope remains same")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a 2nd pod with priority class pclass3")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope remains same")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("2")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass5", "pclass6"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class pclass5")
podName := "testpod-pclass5"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating 2nd pod with priority class pclass6")
podName2 := "testpod-pclass6"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("2")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpNotIn, []string{"pclass7"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class pclass7")
podName := "testpod-pclass7"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is not used")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpExists, []string{}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class pclass8")
podName := "testpod-pclass8"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("3")
hard[v1.ResourceLimitsMemory] = resource.MustParse("3Gi")
ginkgo.By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass9"}))
framework.ExpectNoError(err)
ginkgo.By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Creating a pod with priority class")
podName := "testpod-pclass9"
request := v1.ResourceList{}
request[v1.ResourceCPU] = resource.MustParse("1")
request[v1.ResourceMemory] = resource.MustParse("1Gi")
limit := v1.ResourceList{}
limit[v1.ResourceCPU] = resource.MustParse("2")
limit[v1.ResourceMemory] = resource.MustParse("2Gi")
pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("1")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("2")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("2Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
framework.ExpectNoError(err)
})
})
// newTestResourceQuotaWithScopeSelector returns a quota that enforces default constraints for testing with scopeSelectors
func newTestResourceQuotaWithScopeSelector(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
switch scope {
case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
}
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard,
ScopeSelector: &v1.ScopeSelector{
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
{
ScopeName: scope,
Operator: v1.ScopeSelectorOpExists},
},
},
},
}
}
// newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
switch scope {
case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
}
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard, Scopes: []v1.ResourceQuotaScope{scope}},
}
}
// newTestResourceQuotaWithScopeForPriorityClass returns a quota
// that enforces default constraints for testing with ResourceQuotaScopePriorityClass scope
func newTestResourceQuotaWithScopeForPriorityClass(name string, hard v1.ResourceList, op v1.ScopeSelectorOperator, values []string) *v1.ResourceQuota {
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard,
ScopeSelector: &v1.ScopeSelector{
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
{
ScopeName: v1.ResourceQuotaScopePriorityClass,
Operator: op,
Values: values,
},
},
},
},
}
}
// newTestResourceQuota returns a quota that enforces default constraints for testing
func newTestResourceQuota(name string) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
hard[v1.ResourceServices] = resource.MustParse("10")
hard[v1.ResourceServicesNodePorts] = resource.MustParse("1")
hard[v1.ResourceServicesLoadBalancers] = resource.MustParse("1")
hard[v1.ResourceReplicationControllers] = resource.MustParse("10")
hard[v1.ResourceQuotas] = resource.MustParse("1")
hard[v1.ResourceCPU] = resource.MustParse("1")
hard[v1.ResourceMemory] = resource.MustParse("500Mi")
hard[v1.ResourceConfigMaps] = resource.MustParse("2")
hard[v1.ResourceSecrets] = resource.MustParse("10")
hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
hard[v1.ResourceEphemeralStorage] = resource.MustParse("50Gi")
hard[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
hard[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
// test quota on discovered resource type
hard[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("5")
// test quota on extended resource
hard[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("3")
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard},
}
}
// newTestPodForQuota returns a pod that has the specified requests and limits
func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
// prevent disruption to other test workloads in parallel test runs by ensuring the quota
// test pods don't get scheduled onto a node
NodeSelector: map[string]string{
"x-test.k8s.io/unsatisfiable": "not-schedulable",
},
Containers: []v1.Container{
{
Name: "pause",
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}
// newTestPodForQuotaWithPriority returns a pod that has the specified requests, limits and priority class
func newTestPodForQuotaWithPriority(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList, pclass string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
// prevent disruption to other test workloads in parallel test runs by ensuring the quota
// test pods don't get scheduled onto a node
NodeSelector: map[string]string{
"x-test.k8s.io/unsatisfiable": "not-schedulable",
},
Containers: []v1.Container{
{
Name: "pause",
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
PriorityClassName: pclass,
},
}
}
// newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
}
}
// newTestReplicationControllerForQuota returns a simple replication controller
func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
return &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
},
},
},
}
}
// newTestReplicaSetForQuota returns a simple replica set
func newTestReplicaSetForQuota(name, image string, replicas int32) *appsv1.ReplicaSet {
zero := int64(0)
return &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: appsv1.ReplicaSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
},
},
},
}
}
// newTestServiceForQuota returns a simple service
func newTestServiceForQuota(name string, serviceType v1.ServiceType) *v1.Service {
return &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Type: serviceType,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
}
func newTestConfigMapForQuota(name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string]string{
"a": "b",
},
}
}
func newTestSecretForQuota(name string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
"data-1": []byte("value-1\n"),
"data-2": []byte("value-2\n"),
"data-3": []byte("value-3\n"),
},
}
}
// createResourceQuota in the specified namespace
func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) {
return c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), resourceQuota, metav1.CreateOptions{})
}
// deleteResourceQuota with the specified name
func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
return c.CoreV1().ResourceQuotas(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
// countResourceQuota counts the number of ResourceQuota in the specified namespace
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
func countResourceQuota(c clientset.Interface, namespace string) (int, error) {
found, unchanged := 0, 0
return found, wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(resourceQuotas.Items) == found {
// loop until the number of resource quotas has stabilized for 5 seconds
unchanged++
return unchanged > 4, nil
}
unchanged = 0
found = len(resourceQuotas.Items)
return false, nil
})
}
// wait for resource quota status to show the expected used resources value
func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
return false, err
}
// used may not yet be calculated
if resourceQuota.Status.Used == nil {
return false, nil
}
// verify that the quota shows the expected used resource values
for k, v := range used {
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
return false, nil
}
}
return true, nil
})
}
// updateResourceQuotaUntilUsageAppears updates the resource quota object until the usage is populated
// for the specific resource name.
func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error {
return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
return false, err
}
// verify that the quota shows the expected used resource values
_, ok := resourceQuota.Status.Used[resourceName]
if ok {
return true, nil
}
current := resourceQuota.Spec.Hard[resourceName]
current.Add(resource.MustParse("1"))
resourceQuota.Spec.Hard[resourceName] = current
_, err = c.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota, metav1.UpdateOptions{})
// ignoring conflicts since someone else may already updated it.
if apierrors.IsConflict(err) {
return false, nil
}
return false, err
})
}
| tcnghia/kubernetes | test/e2e/apimachinery/resource_quota.go | GO | apache-2.0 | 88,449 |
package connpool
import (
"net"
"testing"
"github.com/Cloud-Foundations/Dominator/lib/resourcepool"
)
var serverAddress string
func init() {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
panic(err)
}
serverAddress = listener.Addr().String()
//go http.Serve(listener, nil)
}
func TestGetUsePut(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
conn.LocalAddr()
conn.Put()
}
func TestGetClosePut(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
if err := conn.Close(); err != nil {
t.Error(err)
}
conn.Put()
}
func TestGetPutPut(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
conn.Put()
defer func() {
if err := recover(); err == nil {
t.Errorf("Multiple Put() did not panic")
}
}()
conn.Put()
}
func TestUseAfterPut(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
conn.Put()
defer func() {
if err := recover(); err == nil {
t.Errorf("Use after Put() did not panic")
}
}()
conn.LocalAddr()
}
func TestUseAfterClose(t *testing.T) {
cr := New("tcp", serverAddress)
conn, err := cr.Get(resourcepool.MakeImmediateCanceler(), 0)
if err != nil {
t.Error(err)
return
}
conn.Close()
defer func() {
if err := recover(); err == nil {
t.Errorf("Use after Close() did not panic")
}
}()
conn.LocalAddr()
}
| rgooch/Dominator | lib/connpool/usage_test.go | GO | apache-2.0 | 1,703 |
package app.dataTransportObject;
import app.viewObject.OrderVO;
public class OrderDTO {
public OrderDTO(OrderVO orderVo) {
super();
}
}
| pawel-nn/proj_app_bd_sem7 | ProjAppBD/src/main/java/app/dataTransportObject/OrderDTO.java | Java | apache-2.0 | 144 |
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
import (
"fmt"
"reflect"
)
// shell holder
type ParameterHandler struct {
}
func (h *ParameterHandler) GetInterfacesParams(p map[string][]string, s []byte) []byte {
// some work shoud go here
return s
}
func (h *ParameterHandler) GetBgpv4NeighborsParams(p map[string][]string, s []byte) []byte {
// some work should go here
return s
}
func (h *ParameterHandler) GetIpv4RoutesParams(p map[string][]string, s []byte) []byte {
// some work should go here
/*
tmp := "HEY THERE!"
s = append(s, tmp...)
*/
return s
}
func CallParameterHandlerFunc(c interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
function := reflect.ValueOf(c)
m := function.MethodByName(funcName)
if !m.IsValid() {
return make([]reflect.Value, 0), fmt.Errorf("Method not found \"%s\"\n", funcName)
}
in := make([]reflect.Value, len(params))
for i, param := range params {
in[i] = reflect.ValueOf(param)
}
out = m.Call(in)
return
}
| mslocrian/cuview | apis/parameterhandlers.go | GO | apache-2.0 | 1,535 |
/*
* Copyright (c) 2005-2013 Jyoti Parwatikar
* and Washington University in St. Louis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* File: HardwareGraphic.java
* Author: Jyoti Parwatikar
* Email: jp@arl.wustl.edu
* Organization: Washington University
*
* Derived from: none
*
* Date Created: 1/14/2008
*
* Description:
*
* Modification History:
*
*/
import java.awt.*;
import java.awt.geom.*;
import java.lang.Math;
import java.lang.String;
import java.awt.event.*;
import java.beans.PropertyChangeEvent;
import javax.swing.event.*;
import javax.swing.*;//JComponent;
//import javax.swing.JLabel;
import java.util.Vector;
import javax.xml.stream.*;
public class HardwareGraphic extends ONLGraphic
{
protected static final int TEST_HWGRAPHIC = 1;
protected static final int MAIN_BUTTON = -1;
protected static final int OFFSET = 5; //space between the inner and outer border of the component
protected static final int D_OFFSET = 12;//12; //2*OFFSET - the offset for the diameter
private MainButton mainButton = null; //circular button 1/3 diameter of switch representation
private Area mButtonArea = null; //area that defines mainButton space used for computing the area of the portButton
private PortButton portButtons[] = null;
private PortButton.PArea portAreas[] = null; //pie segments
private int numPorts = 8;
private boolean dimensionChange = true;
private boolean selected = false;
private Point2D.Double startOfDrag;
private TopButtonListener pButtonListener = null;
private TopButtonListener mButtonListener = null;
protected static final double MBUTTON_FRACTION = 0.4;
private ComponentLabel componentLabel = null;
private ComponentLabel userLabel = null;
private int originalW = 0;
private int originalH = 0;
//private double diameter = 0;
private Ellipse2D.Double borderEllipse = null;
private double radius = 0;
private double labelRadius = 0; //radius used to calculate where to draw the label on the port button
private double CPRadius = 0; //radius used to calculate where to put the port point on the cp port button
private SpinnerButton spinner = null;
private boolean graphicSelected = false;
public static interface HWButton
{
public boolean isPortButton();
public boolean isMainButton();
public void setPressed(boolean b) ;
public boolean isPressed();
}
protected static class ComponentLabel extends JLabel implements ONLComponentButton
{
private ONLComponent component = null;
private int numChars = 0;
public ComponentLabel(String lbl, ONLComponent c)
{
super(lbl);
component = c;
setFont(new Font("Dialog", Font.PLAIN, 9));
setHorizontalAlignment(SwingConstants.CENTER);
setHorizontalTextPosition(SwingConstants.CENTER);
setForeground(Color.black);
numChars = lbl.length();
}
public ONLComponent getONLComponent() { return component;}
public void setText(String s) { numChars = s.length(); super.setText(s);}
public int getNumChars() { return numChars;}
}//end class HardwareGraphic.CLabel
protected static class SpinnerButton extends JComponent
{
private int curIndex = 0;
private boolean selected = false;
private HardwareGraphic routerGraphic;
private Ellipse2D.Double ellipse = null;
private static final double SRADIUS = 4; //radius of spinner
public SpinnerButton(HardwareGraphic nspg)
{
super();
setDoubleBuffered(true);
routerGraphic = nspg;
ellipse = new Ellipse2D.Double();
setCenter();
setOpaque(false);
setVisible(true);
}
public boolean contains(int dx, int dy)
{
return (ellipse.contains(dx,dy));
}
public int getIndex() { return curIndex;}
public void setIndex(int ndx)
{
int diff = HardwareGraphic.mod((ndx - curIndex), routerGraphic.numPorts);
if (diff > 0)
{
//ExpCoordinator.printer.print(new String("SpinnerButton.setIndex from " + curIndex + " to " + ndx), 10);
if ((HardwareGraphic.mod((curIndex + diff), routerGraphic.numPorts)) == ndx) routerGraphic.spinClockwise(diff);
else
{
routerGraphic.spinCClockwise(diff);
}
if (curIndex != ndx)
{
curIndex = ndx;
setCenter();
}
routerGraphic.revalidate();
routerGraphic.repaint();
}
}
public void setCenter()
{
if ((curIndex < routerGraphic.numPorts) && (curIndex >= 0))
{
//ExpCoordinator.printer.print("SpinnerButton::setCenter " + curIndex);
Point2D.Double cen_pnt = routerGraphic.getPortArea(curIndex).getConnectorPoint();
//Point2D.Double cen_pnt = (Point2D.Double)routerGraphic.getPortButton(curIndex).getCenter();
Point2D tmp_pnt = new Point2D.Double();
tmp_pnt.setLocation((cen_pnt.getX() - SRADIUS), (cen_pnt.getY() - SRADIUS));
ellipse.setFrameFromCenter(cen_pnt,tmp_pnt);
}
}
public boolean isSelected() { return selected;}
public void setSelected(boolean b)
{
if (selected != b)
{
selected = b;
resetColor();
}
}
public void resetColor()
{
if (selected) setBackground(routerGraphic.convertColor(Color.red));
else setBackground(routerGraphic.convertColor(Color.pink));
setCenter();
}
public void paintComponent(Graphics g)
{
super.paintComponent(g);
//ExpCoordinator.printer.print("SpinnerButton::paintComponent");
//setCenter();
Graphics2D g2 = (Graphics2D)g;
Color oldColor = g2.getColor();
g2.setColor(getBackground());
g2.fill(ellipse);
g2.setColor(routerGraphic.getForeground());
g2.draw(ellipse);
g2.setColor(oldColor);
}
}// inner class Spinner
private class SpinnerListener extends TopButtonListener //MouseInputAdapter
{
private HardwareGraphic routerGraphic = null;
private boolean selected = false;
private Point2D.Double startOfDrag = null;
private SpinnerButton spinner = null;
public SpinnerListener(HardwareGraphic nsp_g)
{
super();
routerGraphic = nsp_g;
spinner = nsp_g.spinner;
}
public void mousePressed(java.awt.event.MouseEvent e)
{
//ExpCoordinator.print(new String("HardwareGraphic.SpinnerListener.mousePressed point=" + e.getPoint() + " spinner=" + spinner.getLocation()), TEST_HWGRAPHIC);
if (spinner.contains(e.getPoint()))
{
selected = true;
startOfDrag = new Point2D.Double(e.getX(), e.getY());
spinner.setSelected(true);
routerGraphic.revalidate();
routerGraphic.repaint();
//spinner.repaint();
}
else super.mousePressed(e);
}
public void mouseReleased(java.awt.event.MouseEvent e)
{
if (selected)
{
selected = false;
startOfDrag = null;
//ExpCoordinator.printer.print("SpinnerReleased");
spinner.setSelected(false);
routerGraphic.revalidate();
routerGraphic.repaint();
//routerGraphic.printPorts();
}
else super.mouseReleased(e);
}
public void mouseDragged(java.awt.event.MouseEvent e)
{
//if not in this component do nothing
if (selected)
{
//convert x,y coords into double
double dx = (double)e.getX();
double dy = (double)e.getY();
//use for spinner
//if spinner moves to a new section clockwise or counter clockwise update picture
int spin_ndx = spinner.getIndex();
int tmp_ndx = spin_ndx;
for (int i = 0; i < numPorts; ++i)
{
if (routerGraphic.getPortArea(i).containsSpinner(dx,dy))
{
tmp_ndx = i;
if (tmp_ndx != spin_ndx) ExpCoordinator.print(new String("HardwareGraphic.SpinnerListener.mouseDragged index change from " + spin_ndx + " to " + tmp_ndx));
//break;
}
}
spinner.setIndex(tmp_ndx); //should set spinners new location
startOfDrag.setLocation(dx, dy);
routerGraphic.revalidate();
routerGraphic.repaint();
}
else super.mouseDragged(e);
}
} //inner class SpinnerListener
public static class MainButton extends JComponent implements HWButton, ONLComponentButton //Ellipse2D.Double
{
private HardwareGraphic routerGraphic = null;
private String mlabel;
private int labelOffsetX = -1;
private int labelOffsetY = -1;
private double labelX = 0;
private double labelY = 0;
private Ellipse2D.Double ellipse = null;
private boolean pressed = false;
private Point2D.Double center = null;
public MainButton(HardwareGraphic nspg, String lbl)
{
super();
setDoubleBuffered(true);
//ExpCoordinator.printer.print("MainButton::MainButton for " + lbl);
routerGraphic = nspg;
mlabel = lbl;
ellipse = new Ellipse2D.Double();
center = new Point2D.Double();
setOpaque(false);
setVisible(true);
setFont(new Font("Dialog", Font.PLAIN, 8));
addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
setPressed(true);
}
public void mouseReleased(MouseEvent e)
{
setPressed(false);
}
});
}
public void setSize(double d)
{
this.setSize((int)d,(int)d);
Point p = getLocation();
ellipse.setFrame(p.getX(), p.getY(), d, d);
center.setLocation(d/2, d/2);
labelX = ellipse.getCenterX() - 9;
labelY = ellipse.getCenterY() + 2;
}
public void paintComponent(Graphics g)
{
super.paintComponent(g);
//ExpCoordinator.printer.print("MainButton::paintComponent");
Graphics2D g2 = (Graphics2D)g;
Color oldColor = g2.getColor();
Font oldFont = g2.getFont();
g2.setFont(routerGraphic.getFont());
//if (pressed)
//{
// g2.setColor(routerGraphic.getBackground().darker());
// g2.fill(ellipse);
//}
g2.setColor(routerGraphic.getForeground());
g2.draw(ellipse);
//drawLabel(g2);
g2.setColor(oldColor);
g2.setFont(oldFont);
}
public void drawLabel(Graphics2D g2)
{
//place label of switch id in middle of button
float fsize = 8;
g2.setFont(getFont());
g2.setColor(routerGraphic.getForeground());
g2.drawString(mlabel, (int)labelX, (int)labelY);
}
public Point2D getCenter()
{
//Point2D.Double rtn = new Point2D.Double( ellipse.getCenterX(), ellipse.getCenterY());
return center;
}
//interface HWButton
public boolean isPortButton() { return false;}
public boolean isMainButton() { return true;}
public void setPressed(boolean b)
{
if (pressed != b)
{
pressed = b;
routerGraphic.repaint();
}
}
public boolean isPressed() { return pressed;}
//end interface HWButton
public ONLComponent getONLComponent() { return routerGraphic.getONLComponent();}
}//inner class MainButton
public static class PortButton extends ONLGraphic implements HWButton //Arc2D.Double
{
private static final double CONNRADIUS = 3; //radius of port connector graphic
private String plabel;
private Ellipse2D.Double pconnector;
private boolean pressed = false;
private HardwareGraphic routerGraphic = null;
private PArea parea = null;
private int index;
private int numPorts;
private int portID = 0;
private int labelX = 0;
private int labelY = 0;
protected static class PArea extends Area
{
private int index;
private int numPorts;
private Arc2D.Double arc = null;
private Area area = null;
private Point2D.Double ppPoint = null;
private Point2D.Double cpPoint = null;
private Point2D.Double lblPoint = null;
private double labelTheta = 0; //angle needed to draw points
private HardwareGraphic routerGraphic = null;
public PArea(HardwareGraphic routerg, int ndx)
{
super();
index = ndx;
numPorts = routerg.numPorts;
arc = new Arc2D.Double(Arc2D.PIE);
numPorts = routerg.numPorts;
routerGraphic = routerg;
cpPoint = new Point2D.Double();
ppPoint = new Point2D.Double();
lblPoint = new Point2D.Double();
double ext = 360/numPorts;
arc.setAngleExtent(ext);
double ang_strt = 90 - (index * ext);// + 90;
arc.setAngleStart(ang_strt);
double labelDegrees = ang_strt + (ext/2);
labelTheta = Math.toRadians(labelDegrees);
}
public void setPoints(double x, double y, double rad)
{
//ExpCoordinator.printer.print("PortButton " + index + "::SetPoints center = (" + x + ", " + y + ") r = " + rad);
double tmp_x =(Math.cos(labelTheta)*rad) + x;
double tmp_y = y - (Math.sin(labelTheta)*rad);
ppPoint.setLocation(tmp_x, tmp_y);
//ExpCoordinator.printer.print(" ppPoint = (" + tmp_x + ", " + tmp_y);
double tmp_r = rad * MBUTTON_FRACTION;
tmp_x = (Math.cos(labelTheta)*tmp_r) + x;
tmp_y = y - (Math.sin(labelTheta)*tmp_r);
cpPoint.setLocation(tmp_x, tmp_y);
//ExpCoordinator.printer.print(" cpPoint = (" + tmp_x + ", " + tmp_y);
tmp_r = (rad * (MBUTTON_FRACTION + 1))/2;
tmp_x = (Math.cos(labelTheta)*tmp_r) + x;
tmp_y = y - (Math.sin(labelTheta)*tmp_r);
lblPoint.setLocation(tmp_x, tmp_y);
//ExpCoordinator.printer.print(" lblPoint = (" + tmp_x + ", " + tmp_y);
}
public void setSize(double d, Point loc)
{
Point p = loc;
ExpCoordinator.print(new String("PortButton.PArea setSize " + d + " location:(" + p.getX() + ", " + p.getY() + ") index = " + index), TEST_HWGRAPHIC);
//arc.setFrame((p.getX() + HardwareGraphic.OFFSET), (p.getY() + HardwareGraphic.OFFSET), d, d);
arc.setFrame(p.getX(), p.getY(), d, d);
reset();
add(new Area(arc));
subtract(routerGraphic.getMainButtonArea());
setPoints(arc.getCenterX(), arc.getCenterY(), (d/2));
}
public Point2D getLinkPoint()
{
Point loc = routerGraphic.getLocation();
Point tmp_p = new Point((int)ppPoint.getX(), (int)ppPoint.getY());
tmp_p.translate((int)loc.getX(), ((int)loc.getY() + D_OFFSET));
//ExpCoordinator.printer.print(" link point = ( " + tmp_p.getX() + ", " + tmp_p.getY() + ")");
return tmp_p;
}
protected boolean containsSpinner(double x, double y)
{
//boolean rtn = outerArc.contains(x,y);
Rectangle2D.Double rect = new Rectangle2D.Double();
Point2D endPoint = arc.getEndPoint();
Point2D strPoint = arc.getStartPoint();
rect.setFrameFromDiagonal(strPoint, endPoint);
if (rect.getHeight() < 1) rect.setRect(rect.getX(), (rect.getY()-SpinnerButton.SRADIUS), rect.getWidth(), (rect.getHeight() + (2*SpinnerButton.SRADIUS)));
if (rect.getWidth() < 1) rect.setRect((rect.getX()-SpinnerButton.SRADIUS), rect.getY(), (rect.getWidth() + (2*SpinnerButton.SRADIUS)), rect.getHeight());
boolean rtn = rect.contains(x,y);
if (index == 3)
ExpCoordinator.print(new String("HardwareGraphic.PortButton.PArea(" + index + ").containsSpinner (" + x + ", " + y + ")" + " start=" + strPoint + " end=" + endPoint + " rtn=" + rtn), TEST_HWGRAPHIC);
/*if (rtn)
{
ExpCoordinator.printer.print("PortButton (index,port) (" + index + ", " + port + ")::containsSpinner (" + x + ", " + y + ") " + rtn);
//ExpCoordinator.printer.print(" intersecting rect (" + tmp_x + ", " + tmp_y + ", " + tmp_w + ", " + tmp_h +")");
ExpCoordinator.printer.print(" with rect (" + rect.getX() + ", " + rect.getY() + ", " + rect.getWidth() + ", " + rect.getHeight() +")");
ExpCoordinator.printer.print(" ppPoint = (" + ppPoint.getX() + ", " + ppPoint.getY());
ExpCoordinator.printer.print(" cpPoint = (" + cpPoint.getX() + ", " + cpPoint.getY());
ExpCoordinator.printer.print(" lblPoint = (" + lblPoint.getX() + ", " + lblPoint.getY());
ExpCoordinator.printer.print(" startPoint = (" + strPoint.getX() + ", " + strPoint.getY());
ExpCoordinator.printer.print(" endPoint = (" + endPoint.getX() + ", " + endPoint.getY());
}*/
return rtn;
}
public Point2D.Double getConnectorPoint() { return (ppPoint);}
}
//end PortButton.PArea inner class
public PortButton(HardwareGraphic nspg, Hardware.Port p, int ndx)
{
super(p);
setDoubleBuffered(true);
portID = p.getID();
plabel = String.valueOf(portID);
numPorts = nspg.numPorts;
routerGraphic = nspg;
index = ndx;
//parea = routerGraphic.getPortArea(index);
pconnector = new Ellipse2D.Double();
//ExpCoordinator.printer.print("PortButton::PortButton " + ndx + " angleStart:" + ang_strt + " ext:" + ext + " labelDegrees:" + labelDegrees);
setOpaque(false);
setVisible(true);
addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
if (!routerGraphic.spinner.contains(e.getPoint())) setPressed(true);
}
public void mouseReleased(MouseEvent e)
{
setPressed(false);
}
});
}
public Point2D.Double getConnectorPoint() { return (parea.ppPoint);}
public double getConnectorTheta() { return (parea.labelTheta);}
public Point2D getLinkPoint() { return (parea.getLinkPoint());}
public boolean contains(int x, int y) { return (parea.contains(x,y));}
public void incrementIndex(int i)
{
setIndex(HardwareGraphic.mod((index + i), numPorts));
}
public void decrementIndex(int i)
{
setIndex(HardwareGraphic.mod((index - i), numPorts));
}
public Hardware.Port getPort() { return ((Hardware.Port)getONLComponent());}
protected int getIndex() { return index;}
protected void setIndex(int ndx)
{
//if (index != ndx || parea == null)
// {
index = ndx;
setPArea();
}
public void setPArea()
{
parea = routerGraphic.getPortArea(index);
labelX = (int)(parea.lblPoint.getX() - 3);
labelY = (int)(parea.lblPoint.getY() + 7);
Point2D cen_pnt = parea.ppPoint;
Point2D tmp_pnt = new Point2D.Double();
if (portID == 0) cen_pnt = parea.cpPoint;
tmp_pnt.setLocation((cen_pnt.getX() - CONNRADIUS), (cen_pnt.getY() - CONNRADIUS));
pconnector.setFrameFromCenter(cen_pnt,tmp_pnt);
}
public void paintComponent(Graphics g)
{
super.paintComponent(g);
if (parea == null) setPArea();
//ExpCoordinator.printer.print("PortButton::paintComponent index " + index);
Graphics2D g2 = (Graphics2D)g;
Color oldColor = g2.getColor();
Font oldFont = g2.getFont();
g2.setFont(routerGraphic.getFont());
if (pressed)
{
g2.setColor(routerGraphic.getConvertedBG().darker());
g2.fill(parea);
}
g2.setColor(routerGraphic.getForeground());
g2.draw(parea);
drawLabel(g2);
drawPortConnector(g2);
if (portID == 0) routerGraphic.spinner.paintComponent(g);
g2.setColor(oldColor);
g2.setFont(oldFont);
}
public void drawLabel(Graphics2D g2)
{
g2.setColor(routerGraphic.getForeground());
g2.drawString(plabel, labelX, labelY);
}
public void drawPortConnector(Graphics2D g2)
{
g2.setColor(Color.black);//routerGraphic.getForeground());
g2.fill(pconnector);
}
public Point2D getCenter() { return (parea.lblPoint);}
//interface HWButton
public boolean isPortButton() { return true;}
public boolean isMainButton() { return false;}
public void setPressed(boolean b)
{
if (pressed != b)
{
//ExpCoordinator.printer.print("PortButton (index,port) (" + index + ", " + port + ")::setPressed");
pressed = b;
this.repaint();
}
}
public boolean isPressed() { return pressed;}
//end interface HWButton
public void addDragListener(MouseInputListener dListener){}
public void addComponentListener(ComponentListener cl)
{
super.addComponentListener(cl);
routerGraphic.addComponentListener(cl);
}
public void removeComponentListener(ComponentListener cl)
{
super.removeComponentListener(cl);
routerGraphic.removeComponentListener(cl);
}
private PArea getPArea()
{
return (parea);
}
protected void setPArea(PArea p) { parea = p;}
public int getScreenX() {
//ExpCoordinator.printer.print("PortButton::getScreenX " + routerGraphic.getLocation().getX());
return ((int)routerGraphic.getLocation().getX());}
public int getScreenY() { return ((int)routerGraphic.getLocation().getY());}
}
//inner class PortButton
public HardwareGraphic(Hardware sd, Color bcolor, double d)
{
this(sd, bcolor);
setSize(d);
}
public HardwareGraphic(Hardware sd, Color bcolor)
{
super(sd);
setDoubleBuffered(true);
numPorts = sd.getNumPorts();
ExpCoordinator.print(new String("HardwareGraphic::HardwareGraphic numPorts = " + numPorts), 2);
mButtonArea = new Area();
setForeground(Color.black);
setBackground(bcolor);
portButtons = new PortButton[numPorts];
portAreas = new PortButton.PArea[numPorts];
mainButton = new MainButton(this, onlComponent.getLabel());
int i = 0;
mainButton.addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
setSelected();
}
});
mButtonListener = new TopButtonListener();
mButtonListener.setEnabled(true);
//mainButton.addMouseListener(getActivateListener());
mainButton.addMouseListener(mButtonListener);
mainButton.addMouseMotionListener(mButtonListener);
Hardware router = (Hardware)getONLComponent();
for (i = 0; i < numPorts; ++i)
{
portAreas[i] = new PortButton.PArea(this, i);
}
PortButton tmp_pb = null;
for (i = 0; i < numPorts; ++i)
{
ExpCoordinator.print(("HardwareGraphic.port " + i), 5);
tmp_pb = new PortButton(this, router.getPort(i), i);
//tmp_pb.setLocation(OFFSET, OFFSET);
//tmp_pb.addMouseListener(pButtonListener);
//tmp_pb.addMouseMotionListener(pButtonListener);
//add(tmp_pb);
portButtons[i] = tmp_pb;
}
//printPorts();
spinner = new SpinnerButton(this);
add(spinner,0);//add at the second position so it will be a top level component. i.e. the user can get to it
pButtonListener = new SpinnerListener(this);//TopButtonListener();
pButtonListener.setEnabled(true);
//SpinnerListener sl = new SpinnerListener(this);
//spinner.addMouseListener(sl);
//spinner.addMouseMotionListener(sl);
//addMouseListener(sl);
//addMouseMotionListener(sl);
for (i = 0; i < numPorts; ++i)
{
tmp_pb = portButtons[i];
//tmp_pb.addMouseListener(sl);
//tmp_pb.addMouseMotionListener(sl);
tmp_pb.addMouseListener(pButtonListener);
tmp_pb.addMouseMotionListener(pButtonListener);
add(tmp_pb);
}
add(mainButton,1);
borderEllipse = new Ellipse2D.Double();
setOpaque(false);
setFont(new Font("Dialog", Font.PLAIN, 11));
componentLabel = new ComponentLabel(sd.getLabel(),router);
componentLabel.addMouseListener(mButtonListener);
componentLabel.addMouseMotionListener(mButtonListener);
componentLabel.addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
setSelected();
}
});
add(componentLabel);
userLabel = new ComponentLabel(sd.getUserLabel(),router);
userLabel.addMouseListener(mButtonListener);
userLabel.addMouseMotionListener(mButtonListener);
userLabel.setFont(new Font("Dialog", Font.BOLD, 10));
userLabel.addMouseListener(new MouseAdapter()
{
public void mousePressed(MouseEvent e)
{
setSelected();
}
});
add(userLabel);
repaint();
}
public void setSize(double d)
{
setSize((int)d, (int)d);
}
public void setSize(int w, int h)
{
originalW = w;
originalH = h;
int h2 = h - (2*D_OFFSET);
int d = w - D_OFFSET;
if (h2 < w) d = h2;//h2;//h - (2*D_OFFSET);
borderEllipse.setFrame(0, D_OFFSET, d, d);
int d3 = (int)(d/2 *(1 - MBUTTON_FRACTION));
userLabel.setLocation(0, 0);
//userLabel.setHorizontalAlignment(SwingConstants.LEFT);
int ulbl_w = userLabel.getNumChars() * 9;
int lbl_w = componentLabel.getNumChars() * 9;
if (ulbl_w > lbl_w) lbl_w = ulbl_w;
if (lbl_w > w)
{
super.setSize(lbl_w,h);
//float g_loc = (lbl_w - w)/8;
//ExpCoordinator.print("HostGraphic.setSize(" + w + "," + h + ") lbl_w:" + lbl_w + " g_loc:" + g_loc);
userLabel.setSize(lbl_w,D_OFFSET);
componentLabel.setSize(lbl_w,D_OFFSET);
}
else
{
super.setSize(w,h);
userLabel.setSize(d,D_OFFSET);
componentLabel.setSize(d,D_OFFSET);
}
ExpCoordinator.print(new String("HardwareGraphic::setSize " + w + " " + h +" d=" + d + " d3=" + d3), TEST_HWGRAPHIC);
//mainButton.setLocation((d3 + OFFSET), (d3 + OFFSET));
mainButton.setLocation(d3, (d3 + D_OFFSET));
mainButton.setSize(MBUTTON_FRACTION*d);
mButtonArea.reset();
Ellipse2D.Double mb_ellipse = new Ellipse2D.Double(mainButton.ellipse.getX(), (mainButton.ellipse.getY()-D_OFFSET), mainButton.ellipse.getWidth(), mainButton.ellipse.getHeight());
mButtonArea.add(new Area(mb_ellipse));//mainButton.ellipse));
PortButton elem = null;
for (int i = 0; i < numPorts; ++i)
{
elem = portButtons[i];
elem.setLocation(0, D_OFFSET);
Point elem_loc = new Point(0,0);//elem.getX(), (elem.getY() + D_OFFSET));
//Point elem_loc = elem.getLocation();
//elem.setLocation(elem_loc);
portAreas[i].setSize(d, elem_loc);
elem.setSize(d, d);
elem.setIndex(i);
}
spinner.setCenter();
spinner.revalidate();
componentLabel.setLocation(0, (d + D_OFFSET));
//componentLabel.setHorizontalAlignment(SwingConstants.LEFT);
revalidate();
repaint();
}
public void setUserLabel(String s)
{
userLabel.setText(s);
setSize(originalW, originalH);
}
/*original setSize without adjustment for label size*/
/*
public void setSize(int w, int h)
{
super.setSize(w,h);
int h2 = h - (2*D_OFFSET);
int d = w - D_OFFSET;
if (h2 < w) d = h2;//h2;//h - (2*D_OFFSET);
borderEllipse.setFrame(0, D_OFFSET, d, d);
int d3 = (int)(d/2 *(1 - MBUTTON_FRACTION));
userLabel.setLocation(0, 0);
userLabel.setSize(d,D_OFFSET);
ExpCoordinator.print(new String("HardwareGraphic::setSize " + w + " " + h +" d=" + d + " d3=" + d3), TEST_HWGRAPHIC);
//mainButton.setLocation((d3 + OFFSET), (d3 + OFFSET));
mainButton.setLocation(d3, (d3 + D_OFFSET));
mainButton.setSize(MBUTTON_FRACTION*d);
mButtonArea.reset();
Ellipse2D.Double mb_ellipse = new Ellipse2D.Double(mainButton.ellipse.getX(), (mainButton.ellipse.getY()-D_OFFSET), mainButton.ellipse.getWidth(), mainButton.ellipse.getHeight());
mButtonArea.add(new Area(mb_ellipse));//mainButton.ellipse));
PortButton elem = null;
for (int i = 0; i < numPorts; ++i)
{
elem = portButtons[i];
elem.setLocation(0, D_OFFSET);
Point elem_loc = new Point(0,0);//elem.getX(), (elem.getY() + D_OFFSET));
//Point elem_loc = elem.getLocation();
//elem.setLocation(elem_loc);
portAreas[i].setSize(d, elem_loc);
elem.setSize(d, d);
elem.setIndex(i);
}
spinner.setCenter();
spinner.revalidate();
componentLabel.setLocation(0, (d + D_OFFSET));
componentLabel.setSize(d,D_OFFSET);
revalidate();
repaint();
}*/
private HardwareGraphic.PortButton.PArea getPortArea(int i)
{
return (portAreas[i]);
}
public HardwareGraphic.PortButton getPortButton(int i)
{
return (portButtons[i]);
}
protected PortButton getPortButton(Hardware.Port p)
{
//ExpCoordinator.print(new String("HardwareGraphic.getPortButton " + p.getID()), 2);
return (portButtons[p.getID()]);}
public void setSize(Dimension dim)
{
setSize((int)dim.getWidth(), (int)dim.getHeight());
}
public boolean contains(int x, int y)
{
return (borderEllipse.contains(x, y));
}
public void spinCClockwise(int c)
{
PortButton elem = null;
//ExpCoordinator.printer.print(" spinCCLock " + c);
for (int i = 0; i < numPorts; ++i)
{
elem = portButtons[i];
//elem.incrementIndex(c);
elem.decrementIndex(c);
}
revalidate();
}
public void spinClockwise(int c)
{
PortButton elem = null;
//ExpCoordinator.printer.print(" spinCLock " + c);
for (int i = 0; i < numPorts; ++i)
{
elem = portButtons[i];
//elem.decrementIndex(c);
elem.incrementIndex(c);
}
revalidate();
}
public void paintComponent(Graphics g)
{
//ExpCoordinator.printer.print("HardwareGraphic::paintComponent", 2);
Graphics2D g2 = (Graphics2D)g;
Color oldColor = g2.getColor();
Font oldFont = g2.getFont();
g2.setFont(getFont());
//if (graphicSelected) g2.setColor(getBackground().darker());
//else g2.setColor(getBackgound());
g2.setColor(getConvertedBG());
g2.fill(borderEllipse);
g2.setColor(getForeground());
g2.draw(borderEllipse);
g2.setColor(oldColor);
g2.setFont(oldFont);
mainButton.paintComponent(g);
super.paintComponent(g);
}
public void addPortListener(ONLGraphic.ButtonListener l)
{
pButtonListener.addAction(l);
}
public void removePortListener(ONLGraphic.ButtonListener l)
{
pButtonListener.removeAction(l);
}
public void addNodeListener(ONLGraphic.ButtonListener l)
{
//ExpCoordinator.print(new String("HardwareGraphic(" + onlComponent.getLabel() + ").addNodeListener " + l.toString()), ExpCompare.TEST_CMP);
mButtonListener.addAction(l);
}
public void removeNodeListener(ONLGraphic.ButtonListener l) { mButtonListener.removeAction(l);}
protected Area getMainButtonArea() { return (mButtonArea);}
public boolean isSpinning() { return spinner.isSelected();}
public int getSpinnerPosition() { return (spinner.getIndex());}
public void setSpinnerPosition(int i) { spinner.setIndex(i);}
public static int mod(int x, int m)
{
if (x < m)
{
if (x >= 0) return x;
else
{
return (mod((x+m), m));
}
}
else
{
if (x == 0 && m == 0) return 0;
else return (mod((x-m),m));
}
}
public void printPorts()
{
for (int i = 0; i < numPorts; ++i)
{
ExpCoordinator.printer.print("PortButton (index,port) (" + portButtons[i].getIndex() + ", " + portButtons[i].getPort() + ")");
}
}
public void revalidate()
{
super.revalidate();
if ((mainButton != null) &&
(portButtons != null) &&
(spinner != null))
{
for (int i = 0; i < numPorts; ++i)
{
portButtons[i].revalidate();
}
mainButton.revalidate();
spinner.revalidate();
}
if (componentLabel != null) componentLabel.revalidate();
}
public void setSelected()
{
if (graphicSelected) graphicSelected = false;
else graphicSelected = true;
//if (onlComponent != null && onlComponent instanceof VirtualTopology.VNode)
//{
//ONLComponent real_component = ((VirtualTopology.VNode)onlComponent).getPhysicalComponent();
//if (real_component != null) real_component.setSelected(graphicSelected);
//}
}
public void addComponentListener(ComponentListener cl)
{
super.addComponentListener(cl);
spinner.addComponentListener(cl);
}
public void addDragListener(ONLGraphic.ButtonListener dListener)
{
mainButton.addMouseListener(dListener);
mainButton.addMouseMotionListener(dListener);
componentLabel.addMouseListener(dListener);
componentLabel.addMouseMotionListener(dListener);
//addNodeListener(dListener);
}
public void removeDragListener(ONLGraphic.ButtonListener dListener)
{
mainButton.removeMouseListener(dListener);
mainButton.removeMouseMotionListener(dListener);
componentLabel.removeMouseListener(dListener);
componentLabel.removeMouseMotionListener(dListener);
//removeNodeListener(dListener);
}
protected void setStateChange(String st) //override from ONLGraphic to disable clicks and change colors
{
boolean b = (st.equals(ONLComponent.ACTIVE) ||
st.equals(ONLComponent.WAITING) ||
st.equals(ONLComponent.IN1) ||
st.equals(ONLComponent.INBOTH) ||
st.equals(ONLComponent.IN2));
//ExpCoordinator.printer.print("HardwareGraphic::setStateChange " + st + " " + b);
mButtonListener.setEnabled(b);
pButtonListener.setEnabled(b);
spinner.resetColor();
}
//public Color getBackground() { return (getConvertedBG());}
public void writeXML(XMLStreamWriter xmlWrtr) throws XMLStreamException
{
super.writeXML(xmlWrtr);
xmlWrtr.writeStartElement(ExperimentXML.SPINNER);
xmlWrtr.writeCharacters(String.valueOf(getSpinnerPosition()));
xmlWrtr.writeEndElement();
}
public void propertyChange(PropertyChangeEvent e)
{
if (e.getSource() == onlComponent)
{
if (e.getPropertyName().equals(ExperimentXML.USER_LABEL))
{
userLabel.setText((String)e.getNewValue());
//setSize(getWidth(), getHeight());
}
super.propertyChange(e);
revalidate();
}
}
}
| WU-ARL/RLI | HardwareGraphic.java | Java | apache-2.0 | 32,220 |
/*
* Copyright 2011-2012 Gregory P. Moyer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.syphr.mythtv.db.schema.impl._0_24;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Embeddable;
import org.syphr.mythtv.db.schema.RecordedProgramId;
@Embeddable
public class RecordedProgramId1264 implements RecordedProgramId
{
/**
* Serialization ID
*/
private static final long serialVersionUID = 1L;
@Column(name = "chanid", nullable = false)
private int chanid;
@Column(name = "starttime", nullable = false, length = 19)
private Date starttime;
@Column(name = "manualid", nullable = false)
private int manualid;
@Override
public int getChanid()
{
return this.chanid;
}
@Override
public void setChanid(int chanid)
{
this.chanid = chanid;
}
@Override
public Date getStarttime()
{
return this.starttime;
}
@Override
public void setStarttime(Date starttime)
{
this.starttime = starttime;
}
@Override
public int getManualid()
{
return this.manualid;
}
@Override
public void setManualid(int manualid)
{
this.manualid = manualid;
}
@Override
public boolean equals(Object other)
{
if ((this == other))
{
return true;
}
if ((other == null))
{
return false;
}
if (!(other instanceof RecordedProgramId1264))
{
return false;
}
RecordedProgramId castOther = (RecordedProgramId)other;
return (this.getChanid() == castOther.getChanid())
&& ((this.getStarttime() == castOther.getStarttime()) || (this.getStarttime() != null
&& castOther.getStarttime() != null && this.getStarttime().equals(castOther.getStarttime())))
&& (this.getManualid() == castOther.getManualid());
}
@Override
public int hashCode()
{
int result = 17;
result = 37 * result + this.getChanid();
result = 37 * result + (getStarttime() == null ? 0 : this.getStarttime().hashCode());
result = 37 * result + this.getManualid();
return result;
}
@Override
public String toString()
{
StringBuilder builder = new StringBuilder();
builder.append("RecordedProgramId1264 [chanid=");
builder.append(chanid);
builder.append(", starttime=");
builder.append(starttime);
builder.append(", manualid=");
builder.append(manualid);
builder.append("]");
return builder.toString();
}
}
| syphr42/libmythtv-java | db/src/main/java/org/syphr/mythtv/db/schema/impl/_0_24/RecordedProgramId1264.java | Java | apache-2.0 | 3,214 |
/*
* File: app/controller/footerController.js
*/
Ext.define('webapp.controller.footerController', {
extend: 'Ext.app.Controller',
refs: {
footerLabel2: '#footerLabel2'
},
onLaunch: function() {
/**
* address Label click event를 catch 하도록 설정
*/
this.getFooterLabel2().getEl().on('click', function() {
var mapwin;
// create the window on the first click and reuse on subsequent clicks
if(mapwin) {
mapwin.show();
} else {
mapwin = Ext.create('Ext.window.Window', {
autoShow: true,
layout: 'fit',
title: 'OSCI Location',
closeAction: 'hide',
width:600,
height:500,
border: true,
x: 40,
y: 60,
items: {
xtype: 'gmappanel',
center: {
geoCodeAddr: '서울특별시 서초구 서초2동 1337'
},
markers: [{
lat: 37.492359,
lng: 127.028590,
title: 'Gangnam Mirae Tower 805, Saimdang-ro 174(Seocho-dong), Seocho-gu, Seoul, Korea',
listeners: {
click: function(e){
Ext.Msg.alert('Address', 'Gangnam Mirae Tower 805, Saimdang-ro 174(Seocho-dong), Seocho-gu, Seoul, Korea');
}
}
}]
}
});
}
});
// Add below script to index.html manually
// <script type="text/javascript" src="http://maps.google.com/maps/api/js?sensor=false"></script>
}
});
| OpenSourceConsulting/athena-meerkat | console/app/controller/footerController.js | JavaScript | apache-2.0 | 1,951 |
/*
* Copyright 2005-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl1.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.kra.irb.actions.notification;
import org.apache.struts.upload.FormFile;
import org.jmock.Expectations;
import org.jmock.Mockery;
import org.jmock.integration.junit4.JUnit4Mockery;
import org.jmock.lib.concurrent.Synchroniser;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.kuali.kra.infrastructure.Constants;
import org.kuali.rice.krad.util.GlobalVariables;
import org.kuali.rice.krad.util.MessageMap;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class ProtocolNotificationTemplateRuleTest {
Mockery context = new JUnit4Mockery() {{ setThreadingPolicy(new Synchroniser()); }};
FormFile mockedFile = null;
@Before
public void setUp() throws Exception {
mockedFile = this.context.mock(FormFile.class);
// Clear any error messages that may have been created in prior tests.
MessageMap messageMap = GlobalVariables.getMessageMap();
messageMap.clearErrorMessages();
}
@After
public void tearDown() throws Exception {
mockedFile = null;
}
/**
*
* This test simulates a correspondence template being added whose committee is not specified.
*
* @throws Exception
*/
@SuppressWarnings("unchecked")
@Test
public void testReplaceNotificationTemplateOK() throws Exception {
simulateValidMockedFileBehavior(Constants.CORRESPONDENCE_TEMPLATE_CONTENT_TYPE_1);
ProtocolNotificationTemplate template = new ProtocolNotificationTemplate();
template.setActionTypeCode("116");
template.setFileName("notifyirb.xsl");
template.setNotificationTemplate(new byte[] { (byte) 1, (byte) 2, (byte) 3 });
template.setTemplateFile(mockedFile);
int index = 2;
boolean rulePassed = new ProtocolNotificationTemplateRule()
.processReplaceProtocolNotificationTemplateRules(template, index);
assertTrue(rulePassed);
/*
* There should be no errors.
*/
MessageMap messageMap = GlobalVariables.getMessageMap();
assertEquals(0, messageMap.getErrorCount());
}
@SuppressWarnings("unchecked")
@Test
public void testReplaceNotificationTemplateNotOK() throws Exception {
simulateValidMockedFileBehavior("pdf");
ProtocolNotificationTemplate template = new ProtocolNotificationTemplate();
template.setActionTypeCode("116");
template.setFileName("test.pdf");
template.setNotificationTemplate(new byte[] {});
template.setTemplateFile(mockedFile);
int index = 2;
boolean rulePassed = new ProtocolNotificationTemplateRule()
.processReplaceProtocolNotificationTemplateRules(template, index);
Assert.assertFalse(rulePassed);
/*
* There should be no errors.
*/
MessageMap messageMap = GlobalVariables.getMessageMap();
assertEquals(1, messageMap.getErrorCount());
}
private void simulateValidMockedFileBehavior(final String contentType) throws IOException {
this.context.checking(new Expectations() {
{
allowing(mockedFile).getContentType();
will(returnValue(contentType));
allowing(mockedFile).getFileData();
will(returnValue(new byte[] { (byte) 1, (byte) 2, (byte) 3 }));
}
});
}
}
| blackcathacker/kc.preclean | coeus-code/src/test/java/org/kuali/kra/irb/actions/notification/ProtocolNotificationTemplateRuleTest.java | Java | apache-2.0 | 4,135 |
/*
* Copyright 2005-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.osedu.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.coeus.common.budget.framework.core;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionForward;
import org.apache.struts.action.ActionMapping;
import org.kuali.coeus.common.budget.framework.personnel.*;
import org.kuali.coeus.common.framework.rolodex.PersonRolodex;
import org.kuali.coeus.common.framework.version.VersionStatus;
import org.kuali.coeus.common.framework.version.history.VersionHistory;
import org.kuali.coeus.common.framework.version.history.VersionHistoryService;
import org.kuali.coeus.propdev.impl.budget.ProposalBudgetStatusService;
import org.kuali.coeus.propdev.impl.core.ProposalDevelopmentDocument;
import org.kuali.coeus.sys.framework.controller.StrutsConfirmation;
import org.kuali.coeus.sys.framework.service.KcServiceLocator;
import org.kuali.coeus.sys.framework.workflow.KcDocumentRejectionService;
import org.kuali.kra.award.document.AwardDocument;
import org.kuali.kra.award.home.Award;
import org.kuali.kra.award.home.ContactRole;
import org.kuali.coeus.common.budget.framework.calculator.BudgetCalculationService;
import org.kuali.coeus.common.budget.framework.distribution.BudgetDistributionService;
import org.kuali.coeus.common.budget.framework.core.category.BudgetCategoryTypeValuesFinder;
import org.kuali.coeus.common.budget.framework.nonpersonnel.BudgetLineItem;
import org.kuali.coeus.common.budget.framework.nonpersonnel.BudgetLineItemCalculatedAmount;
import org.kuali.coeus.common.budget.framework.period.BudgetPeriod;
import org.kuali.coeus.common.budget.framework.rate.BudgetRatesService;
import org.kuali.coeus.common.budget.framework.lock.BudgetLockService;
import org.kuali.coeus.common.budget.framework.summary.BudgetSummaryService;
import org.kuali.coeus.common.budget.framework.version.BudgetDocumentVersion;
import org.kuali.coeus.common.budget.framework.version.BudgetVersionOverview;
import org.kuali.kra.infrastructure.Constants;
import org.kuali.kra.infrastructure.KeyConstants;
import org.kuali.coeus.common.framework.print.AttachmentDataSource;
import org.kuali.coeus.propdev.impl.core.DevelopmentProposal;
import org.kuali.coeus.propdev.impl.budget.modular.BudgetModularService;
import org.kuali.coeus.common.budget.framework.print.BudgetPrintService;
import org.kuali.coeus.propdev.impl.budget.subaward.PropDevBudgetSubAwardService;
import org.kuali.coeus.propdev.impl.hierarchy.ProposalHierarcyActionHelper;
import org.kuali.rice.core.api.util.KeyValue;
import org.kuali.rice.coreservice.framework.parameter.ParameterService;
import org.kuali.rice.kew.api.KewApiConstants;
import org.kuali.rice.kew.api.exception.WorkflowException;
import org.kuali.rice.kns.authorization.AuthorizationConstants;
import org.kuali.rice.kns.datadictionary.HeaderNavigation;
import org.kuali.rice.kns.datadictionary.KNSDocumentEntry;
import org.kuali.rice.kns.question.ConfirmationQuestion;
import org.kuali.rice.kns.service.DataDictionaryService;
import org.kuali.rice.kns.util.KNSGlobalVariables;
import org.kuali.rice.kns.util.WebUtils;
import org.kuali.rice.kns.web.struts.form.KualiDocumentFormBase;
import org.kuali.rice.kns.web.struts.form.KualiForm;
import org.kuali.rice.krad.rules.rule.event.DocumentAuditEvent;
import org.kuali.rice.krad.service.DocumentService;
import org.kuali.rice.krad.service.KualiRuleService;
import org.kuali.rice.krad.service.PessimisticLockService;
import org.kuali.rice.krad.util.GlobalVariables;
import org.kuali.rice.krad.util.KRADConstants;
import org.kuali.rice.krad.util.ObjectUtils;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class BudgetAction extends BudgetActionBase {
private static final Log LOG = LogFactory.getLog(BudgetAction.class);
private static final String DOCUMENT_REJECT_QUESTION="DocReject";
protected static final String CONFIRM_SYNCH_BUDGET_RATE = "confirmSynchBudgetRate";
protected static final String NO_SYNCH_BUDGET_RATE = "noSynchBudgetRate";
protected static final String CONFIRM_SYNCH_AWARD_RATES = "confirmSynchAwardRates";
protected static final String NO_SYNCH_AWARD_RATES = "noSynchAwardRates";
private ProposalHierarcyActionHelper hierarchyHelper;
@Override
public ActionForward docHandler(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
ActionForward forward = super.docHandler(mapping, form, request, response);
BudgetForm budgetForm = (BudgetForm) form;
if (KewApiConstants.INITIATE_COMMAND.equals(budgetForm.getCommand())) {
budgetForm.getBudgetDocument().initialize();
}else{
budgetForm.initialize();
}
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
if (budgetDocument.isBudgetDeleted()) {
return mapping.findForward("deleted");
}
Budget budget = budgetDocument.getBudget();
copyLineItemToPersonnelDetails(budgetDocument);
if (budget.getActivityTypeCode().equals("x")) {
budget.setActivityTypeCode(KcServiceLocator.getService(BudgetService.class).getActivityTypeForBudget(budgetDocument));
}
if(budget.getOhRateClassCode()!=null && ((BudgetForm)KNSGlobalVariables.getKualiForm())!=null){
((BudgetForm)KNSGlobalVariables.getKualiForm()).setOhRateClassCodePrevValue(budget.getOhRateClassCode());
}
if(budget.getUrRateClassCode()!=null && ((BudgetForm)KNSGlobalVariables.getKualiForm())!=null){
((BudgetForm)KNSGlobalVariables.getKualiForm()).setUrRateClassCodePrevValue(budget.getUrRateClassCode());
}
if (isAwardBudget(budgetDocument) && StringUtils.isNotBlank(budgetForm.getSyncBudgetRate()) && budgetForm.getSyncBudgetRate().equals("Y")) {
getBudgetRatesService().syncParentDocumentRates(budget);
getBudgetCommonService(budget.getBudgetParent()).recalculateBudget(budget);
}
reconcileBudgetStatus(budgetForm);
if ("Personnel".equals(budgetForm.getActivePanelName())) {
forward = personnel(mapping, budgetForm, request, response);
}
return forward;
}
protected StrutsConfirmation syncAwardBudgetRateConfirmationQuestion(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response, String message) throws Exception {
return buildParameterizedConfirmationQuestion(mapping, form, request, response, CONFIRM_SYNCH_AWARD_RATES,
message, "");
}
public ActionForward confirmSynchAwardRates(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
return synchAwardBudgetRate(mapping, form, request, response, true);
}
public ActionForward noSynchAwardRates(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
return synchAwardBudgetRate(mapping, form, request, response, false);
}
private ActionForward synchAwardBudgetRate(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response, boolean confirm) throws Exception {
BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDoc = budgetForm.getBudgetDocument();
String routeHeaderId = budgetDoc.getDocumentHeader().getWorkflowDocument().getDocumentId();
String forward = buildForwardUrl(routeHeaderId);
if (confirm) {
forward = forward.replace("awardBudgetParameters.do?", "awardBudgetParameters.do?syncBudgetRate=Y&");
}
return new ActionForward(forward, true);
}
/**
* This method returns true if the BudgetDocument is an AwardBudgetDocument instance
* @param budgetDocument
* @return
*/
protected boolean isAwardBudget(BudgetDocument budgetDocument) {
return !Boolean.parseBoolean(budgetDocument.getBudget().getBudgetParent().getDocument().getProposalBudgetFlag());
}
private BudgetRatesService<BudgetParent> getBudgetRatesService() {
return KcServiceLocator.getService(BudgetRatesService.class);
}
public List<HeaderNavigation> getBudgetHeaderNavigatorList(){
DataDictionaryService dataDictionaryService = (DataDictionaryService) KcServiceLocator.getService(Constants.DATA_DICTIONARY_SERVICE_NAME);
KNSDocumentEntry docEntry = (KNSDocumentEntry) dataDictionaryService.getDataDictionary().getDocumentEntry(BudgetDocument.class.getName());
return docEntry.getHeaderNavigationList();
}
/**
* Need to suppress buttons here when 'Totals' tab is clicked.
*/
@Override
public ActionForward execute(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
final BudgetForm budgetForm = (BudgetForm) form;
if(budgetForm.getMethodToCall().equals("close")){
setupDocumentExit();
}
ActionForward actionForward = null;
actionForward = super.execute(mapping, budgetForm, request, response);
if (actionForward != null) {
if ("summaryTotals".equals(actionForward.getName())) {
budgetForm.suppressButtonsForTotalPage();
}
}
// check if audit rule check is done from PD
if (budgetForm.isAuditActivated() && !"route".equals(((KualiForm)form).getMethodToCall())) {
KcServiceLocator.getService(KualiRuleService.class).applyRules(new DocumentAuditEvent(budgetForm.getBudgetDocument()));
}
return actionForward;
}
@Override
public ActionForward save(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
BudgetForm budgetForm = (BudgetForm) form;
final BudgetDocument budgetDoc = budgetForm.getBudgetDocument();
Budget budget = budgetDoc.getBudget();
getBudgetCommonService(budget.getBudgetParent()).calculateBudgetOnSave(budget);
ActionForward forward = super.save(mapping, form, request, response);
BudgetForm savedBudgetForm = (BudgetForm) form;
BudgetDocument savedBudgetDoc = savedBudgetForm.getBudgetDocument();
final BudgetTDCValidator tdcValidator = new BudgetTDCValidator(request);
if (budgetForm.toBudgetVersionsPage()
|| "BudgetVersionsAction".equals(budgetForm.getActionName())) {
GlobalVariables.getMessageMap().addToErrorPath(KRADConstants.DOCUMENT_PROPERTY_NAME + ".proposal");
tdcValidator.validateGeneratingErrorsAndWarnings(budgetDoc.getBudget().getBudgetParent().getDocument());
} else {
tdcValidator.validateGeneratingWarnings(budgetDoc.getBudget().getBudgetParent().getDocument());
}
if (budgetForm.getMethodToCall().equals("save") && budgetForm.isAuditActivated()) {
forward = mapping.findForward("budgetActions");
}
return forward;
}
protected BudgetSummaryService getBudgetSummaryService() {
return KcServiceLocator.getService(BudgetSummaryService.class);
}
@Override
public ActionForward reload(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response)
throws Exception {
final ActionForward forward = super.reload(mapping, form, request, response);
updateBudgetAttributes(form, request);
return forward;
}
@Override
public ActionForward reloadWithoutWarning(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response)
throws Exception {
final ActionForward forward = super.reloadWithoutWarning(mapping, form, request, response);
updateBudgetAttributes(form, request);
return forward;
}
@SuppressWarnings("rawtypes")
protected void updateBudgetAttributes(ActionForm form, HttpServletRequest request) {
final BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
BudgetParentDocument parentDocument = budgetDocument.getBudget().getBudgetParent().getDocument();
budgetForm.setFinalBudgetVersion(getFinalBudgetVersion(parentDocument.getBudgetDocumentVersions()));
setBudgetStatuses(budgetDocument.getBudget().getBudgetParent());
final BudgetTDCValidator tdcValidator = new BudgetTDCValidator(request);
tdcValidator.validateGeneratingWarnings(budgetDocument.getBudget().getBudgetParent().getDocument());
populateBudgetPrintForms(budgetDocument.getBudget());
}
public ActionForward versions(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
BudgetParentDocument parentDocument = budgetDocument.getBudget().getBudgetParent().getDocument();
budgetForm.setFinalBudgetVersion(getFinalBudgetVersion(parentDocument.getBudgetDocumentVersions()));
setBudgetStatuses(budgetDocument.getBudget().getBudgetParent());
return mapping.findForward(Constants.BUDGET_VERSIONS_PAGE);
}
public ActionForward parameters(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
reconcileBudgetStatus((BudgetForm) form);
BudgetDocument budgetDocument = ((BudgetForm)form).getBudgetDocument();
getBudgetSummaryService().setupOldStartEndDate(budgetDocument.getBudget(),false);
return mapping.findForward(Constants.BUDGET_PERIOD_PAGE);
}
public ActionForward personnel(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
populatePersonnelHierarchySummary(budgetForm);
populatePersonnelCategoryTypeCodes(budgetForm);
if (budgetForm.getBudgetDocument().getBudget().getBudgetPersons().isEmpty()) {
KcServiceLocator.getService(BudgetPersonService.class).synchBudgetPersonsToProposal(budgetForm.getBudgetDocument().getBudget());
}
reconcilePersonnelRoles(budgetForm.getBudgetDocument());
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
for(BudgetPeriod period : budget.getBudgetPeriods()) {
for(BudgetLineItem lineItem : period.getBudgetLineItems()) {
for(BudgetPersonnelDetails budgetPersonnelDetails : lineItem.getBudgetPersonnelDetailsList()) {
budgetPersonnelDetails.refreshReferenceObject("budgetPerson");
ObjectUtils.materializeObjects(budgetPersonnelDetails.getBudgetPersonnelCalculatedAmounts());
for(BudgetPersonnelCalculatedAmount budgetPersonnelCalculatedAmount:budgetPersonnelDetails.getBudgetPersonnelCalculatedAmounts()){
if(budgetPersonnelCalculatedAmount.getRateClass() == null) {
budgetPersonnelCalculatedAmount.refreshReferenceObject("rateClass");
}
}
}
for(BudgetLineItemCalculatedAmount lineItemCalculatedAmount:lineItem.getBudgetLineItemCalculatedAmounts()){
if(lineItemCalculatedAmount.getRateClass() == null) {
lineItemCalculatedAmount.refreshReferenceObject("rateClass");
}
}
}
}
ParameterService parameterService = KcServiceLocator.getService(ParameterService.class);
String enableBudgetSalaryByPeriod = parameterService.getParameterValueAsString(ProposalDevelopmentDocument.class, Constants.ENABLE_BUDGET_CALCULATED_SALARY);
budgetForm.setEnableBudgetSalaryByPeriod(enableBudgetSalaryByPeriod);
return mapping.findForward(Constants.BUDGET_PERSONNEL_PAGE);
}
protected void populatePersonnelHierarchySummary(BudgetForm budgetForm) {
if (budgetForm.getBudgetDocument().getBudget().isProposalBudget()) {
DevelopmentProposal parent = (DevelopmentProposal) budgetForm.getBudgetDocument().getBudget().getBudgetParent();
String proposalNumber = parent.getProposalNumber();
budgetForm.setHierarchyPersonnelSummaries(getHierarchyHelper().getHierarchyPersonnelSummaries(proposalNumber));
for (HierarchyPersonnelSummary hierarchyPersonnelSummary : budgetForm.getHierarchyPersonnelSummaries()) {
for (Budget budget : hierarchyPersonnelSummary.getHierarchyBudgets()) {
reconcilePersonnelRoles(budgetForm.getBudgetDocument());
}
}
}
}
private String getPersonnelBudgetCategoryTypeCode() {
return this.getParameterService().getParameterValueAsString(BudgetDocument.class, Constants.BUDGET_CATEGORY_TYPE_PERSONNEL);
}
protected void populatePersonnelCategoryTypeCodes(BudgetForm budgetForm) {
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
BudgetCategoryTypeValuesFinder budgetCategoryTypeValuesFinder = new BudgetCategoryTypeValuesFinder();
List<KeyValue> budgetCategoryTypes = new ArrayList<KeyValue>();
String personnelBudgetCategoryTypeCode = getPersonnelBudgetCategoryTypeCode();
for(KeyValue budgetCategoryType: budgetCategoryTypeValuesFinder.getKeyValues()){
String budgetCategoryTypeCode = (String) budgetCategoryType.getKey();
if(StringUtils.isNotBlank(budgetCategoryTypeCode) && StringUtils.equalsIgnoreCase(budgetCategoryTypeCode, personnelBudgetCategoryTypeCode)) {
budgetCategoryTypes.add(budgetCategoryType);
BudgetLineItem newBudgetLineItem = budget.getNewBudgetLineItem();
if (budgetForm.getNewBudgetLineItems() == null) {
budgetForm.setNewBudgetLineItems(new ArrayList<BudgetLineItem>());
}
budgetForm.getNewBudgetLineItems().add(newBudgetLineItem);
}
}
budget.setBudgetCategoryTypeCodes(budgetCategoryTypes);
}
protected void populateNonPersonnelCategoryTypeCodes(BudgetForm budgetForm) {
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
BudgetCategoryTypeValuesFinder budgetCategoryTypeValuesFinder = new BudgetCategoryTypeValuesFinder();
List<KeyValue> budgetCategoryTypes = new ArrayList<KeyValue>();
String personnelBudgetCategoryTypeCode = getPersonnelBudgetCategoryTypeCode();
for(KeyValue budgetCategoryType: budgetCategoryTypeValuesFinder.getKeyValues()){
String budgetCategoryTypeCode = (String) budgetCategoryType.getKey();
if(StringUtils.isNotBlank(budgetCategoryTypeCode) && !StringUtils.equalsIgnoreCase(budgetCategoryTypeCode, personnelBudgetCategoryTypeCode)) {
budgetCategoryTypes.add(budgetCategoryType);
BudgetLineItem newBudgetLineItem = budget.getNewBudgetLineItem();
budgetForm.getNewBudgetLineItems().add(newBudgetLineItem);
}
}
budget.setBudgetCategoryTypeCodes(budgetCategoryTypes);
}
public ActionForward expenses(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
populateNonPersonnelCategoryTypeCodes(budgetForm);
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
budget.refreshReferenceObject("budgetPeriods");
return mapping.findForward(Constants.BUDGET_EXPENSES_PAGE);
}
public ActionForward rates(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
return mapping.findForward(Constants.BUDGET_RATES_PAGE);
}
public ActionForward distributionAndIncome(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetDistributionService budgetDistributionService = KcServiceLocator.getService(BudgetDistributionService.class);
budgetDistributionService.initializeCollectionDefaults(((BudgetForm) form).getBudgetDocument().getBudget());
return mapping.findForward(Constants.BUDGET_DIST_AND_INCOME_PAGE);
}
public ActionForward modularBudget(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
BudgetModularService budgetModularService = KcServiceLocator.getService(BudgetModularService.class);
budgetForm.setBudgetModularSummary(budgetModularService.generateModularSummary(budgetForm.getBudgetDocument().getBudget()));
return mapping.findForward(Constants.BUDGET_MODULAR_PAGE);
}
protected void populatePersonnelRoles(BudgetDocument budgetDocument) {
BudgetParent budgetParent = budgetDocument.getBudget().getBudgetParent().getDocument().getBudgetParent();
List<BudgetPerson> budgetPersons = budgetDocument.getBudget().getBudgetPersons();
for (BudgetPerson budgetPerson: budgetPersons) {
String roleDesc = "";
if (budgetPerson.getRolodexId() != null) {
PersonRolodex person = budgetParent.getProposalNonEmployee(budgetPerson.getRolodexId());
ContactRole role = budgetParent.getProposalNonEmployeeRole(budgetPerson.getRolodexId());
if (role != null) {
roleDesc = person.getInvestigatorRoleDescription();
if(person != null && StringUtils.equals(Constants.KEY_PERSON_ROLE, role.getRoleCode()) && StringUtils.isNotEmpty(person.getProjectRole())) {
roleDesc = person.getProjectRole();
}
}
} else if (budgetPerson.getPersonId() != null) {
PersonRolodex person = budgetParent.getProposalEmployee(budgetPerson.getPersonId());
ContactRole role = budgetParent.getProposalEmployeeRole(budgetPerson.getPersonId());
if (role != null) {
roleDesc = person.getInvestigatorRoleDescription();
if(person != null && StringUtils.equals(Constants.KEY_PERSON_ROLE, role.getRoleCode()) && StringUtils.isNotEmpty(person.getProjectRole())) {
roleDesc = person.getProjectRole();
}
}
}
budgetPerson.setRole(roleDesc);
}
}
public ActionForward summaryTotals(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
populatePersonnelRoles(budgetDocument);
Budget budget = budgetDocument.getBudget();
for(BudgetPeriod period : budget.getBudgetPeriods()) {
for(BudgetLineItem lineItem : period.getBudgetLineItems()) {
for(BudgetPersonnelDetails budgetPersonnelDetails : lineItem.getBudgetPersonnelDetailsList()) {
budgetPersonnelDetails.refreshReferenceObject("budgetPerson");
}
}
}
budget.getBudgetTotals();
budgetForm.setProposalHierarchyIndirectObjectCode(getParameterService().getParameterValueAsString(BudgetDocument.class, "proposalHierarchySubProjectIndirectCostElement"));
return mapping.findForward(Constants.BUDGET_SUMMARY_TOTALS_PAGE);
}
public ActionForward proposalHierarchy(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
return mapping.findForward(Constants.PROPOSAL_HIERARCHY_PAGE);
}
public ActionForward hierarchy(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm)form;
DevelopmentProposal pd = (DevelopmentProposal) budgetForm.getBudgetDocument().getBudget().getBudgetParent();
budgetForm.setHierarchyProposalSummaries(getHierarchyHelper().getHierarchyProposalSummaries(pd.getProposalNumber()));
return mapping.findForward(Constants.HIERARCHY_PAGE);
}
public ActionForward budgetActions(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) {
BudgetForm budgetForm = (BudgetForm) form;
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
populateBudgetPrintForms(budget);
KcServiceLocator.getService(PropDevBudgetSubAwardService.class).prepareBudgetSubAwards(budget);
return mapping.findForward(Constants.BUDGET_ACTIONS_PAGE);
}
protected ProposalHierarcyActionHelper getHierarchyHelper() {
if (hierarchyHelper == null) {
hierarchyHelper = new ProposalHierarcyActionHelper();
}
return hierarchyHelper;
}
private void populateBudgetPrintForms(Budget budget) {
if(budget.getBudgetPrintForms().isEmpty()){
BudgetPrintService budgetPrintService = KcServiceLocator.getService(BudgetPrintService.class);
budgetPrintService.populateBudgetPrintForms(budget);
}
}
public ActionForward returnToProposal(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
final BudgetForm budgetForm = (BudgetForm) form;
ActionForward forward = null;
if (!StringUtils.equalsIgnoreCase((String)budgetForm.getEditingMode().get(AuthorizationConstants.EditMode.VIEW_ONLY), "TRUE")) {
forward = this.save(mapping, form, request, response);
}
setupDocumentExit();
if (forward == null || !forward.getPath().contains(KRADConstants.QUESTION_ACTION)) {
return this.getReturnToProposalForward(budgetForm);
}
return forward;
}
public ActionForward returnToAward(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
final BudgetForm budgetForm = (BudgetForm) form;
ActionForward forward = null;
if (!"true".equals(budgetForm.getEditingMode().get(AuthorizationConstants.EditMode.VIEW_ONLY))) {
forward = this.save(mapping, form, request, response);
}
setupDocumentExit();
if (forward == null || !forward.getPath().contains(KRADConstants.QUESTION_ACTION)) {
return this.getReturnToAwardForward(budgetForm);
}
return forward;
}
private ActionForward getReturnToAwardForward(BudgetForm budgetForm) throws Exception{
assert budgetForm != null : "the form is null";
final DocumentService docService = KcServiceLocator.getService(DocumentService.class);
Award award = (Award) budgetForm.getBudgetDocument().getBudget().getBudgetParent();
//find the newest, uncanceled award document to return to
String docNumber = award.getAwardDocument().getDocumentNumber();
List<VersionHistory> versions = KcServiceLocator.getService(VersionHistoryService.class).loadVersionHistory(Award.class, award.getAwardNumber());
for (VersionHistory version : versions) {
if (version.getSequenceOwnerSequenceNumber() > award.getSequenceNumber() &&
version.getStatus() != VersionStatus.CANCELED) {
docNumber = ((Award) version.getSequenceOwner()).getAwardDocument().getDocumentNumber();
}
}
final AwardDocument awardDocument = (AwardDocument) docService.getByDocumentHeaderId(docNumber);
String forwardUrl = buildForwardUrl(awardDocument.getDocumentHeader().getWorkflowDocument().getDocumentId());
if(budgetForm.isAuditActivated()) {
forwardUrl = StringUtils.replace(forwardUrl, "Award.do?", "Actions.do?");
}
//add showAllBudgetVersion to the url to persist that flag until they leave the document
forwardUrl = StringUtils.replace(forwardUrl, ".do?", ".do?showAllBudgetVersions=" + budgetForm.isShowAllBudgetVersions() + "&");
return new ActionForward(forwardUrl, true);
}
/**
* Gets the correct return-to-proposal action forward.
*
* @param form the budget form
* @return the action forward
* @throws WorkflowException if there is a problem interacting with workflow
*/
private ActionForward getReturnToProposalForward(final BudgetForm form) throws WorkflowException {
assert form != null : "the form is null";
final DocumentService docService = KcServiceLocator.getService(DocumentService.class);
final String docNumber = form.getBudgetDocument().getBudget().getBudgetParent().getDocument().getDocumentNumber();
final ProposalDevelopmentDocument pdDoc = (ProposalDevelopmentDocument) docService.getByDocumentHeaderId(docNumber);
String forwardUrl = buildForwardUrl(pdDoc.getDocumentHeader().getWorkflowDocument().getDocumentId());
if(form.isAuditActivated()) {
forwardUrl = StringUtils.replace(forwardUrl, "Proposal.do?", "Actions.do?auditActivated=true&");
}
forwardUrl += "&methodToCallAttribute=methodToCall.reload";
return new ActionForward(forwardUrl, true);
}
public void reconcilePersonnelRoles(BudgetDocument budgetDocument) {
// Populate the person's proposal roles, if they exist
Budget budget = budgetDocument.getBudget();
BudgetParent budgetParent = budget.getBudgetParent();
List<BudgetPerson> budgetPersons = budget.getBudgetPersons();
for (BudgetPerson budgetPerson: budgetPersons) {
if (budgetPerson.getRolodexId() != null) {
PersonRolodex person = budgetParent.getProposalNonEmployee(budgetPerson.getRolodexId());
if (person != null) { budgetPerson.setRole(person.getInvestigatorRoleDescription()); }
} else if (budgetPerson.getPersonId() != null) {
PersonRolodex person = budgetParent.getProposalEmployee(budgetPerson.getPersonId());
if (person != null) { budgetPerson.setRole(person.getInvestigatorRoleDescription()); }
}
}
}
protected void reconcileBudgetStatus(BudgetForm budgetForm) {
BudgetDocument budgetDocument = budgetForm.getBudgetDocument();
Budget budget = budgetDocument.getBudget();
BudgetParent budgetParent = budgetDocument.getBudget().getBudgetParent().getDocument().getBudgetParent();
if (budgetParent instanceof DevelopmentProposal) {
DevelopmentProposal proposal = (DevelopmentProposal)budgetParent;
KcServiceLocator.getService(ProposalBudgetStatusService.class).loadBudgetStatus(proposal);
}
if (budget.getFinalVersionFlag() != null && Boolean.TRUE.equals(budget.getFinalVersionFlag())) {
budget.setBudgetStatus(budgetParent.getBudgetStatus());
} else {
String budgetStatusIncompleteCode = this.getParameterService().getParameterValueAsString(
BudgetDocument.class, Constants.BUDGET_STATUS_INCOMPLETE_CODE);
budget.setBudgetStatus(budgetStatusIncompleteCode);
}
}
/**
*
* Handy method to stream the byte array to response object
* @param attachmentDataSource
* @param response
* @throws Exception
*/
public void streamToResponse(AttachmentDataSource attachmentDataSource,HttpServletResponse response) throws Exception{
byte[] xbts = attachmentDataSource.getData();
ByteArrayOutputStream baos = null;
if(xbts!=null)
try{
baos = new ByteArrayOutputStream(xbts.length);
baos.write(xbts);
WebUtils.saveMimeOutputStreamAsFile(response, attachmentDataSource.getType(), baos, attachmentDataSource.getName());
}finally{
try{
if(baos!=null){
baos.close();
baos = null;
}
}catch(IOException ioEx){
LOG.warn(ioEx.getMessage(), ioEx);
}
}
}
private void copyLineItemToPersonnelDetails(BudgetDocument budgetDocument) {
for (BudgetPeriod budgetPeriod : budgetDocument.getBudget().getBudgetPeriods()) {
if (budgetPeriod.getBudgetLineItems() != null && !budgetPeriod.getBudgetLineItems().isEmpty()) {
for (BudgetLineItem budgetLineItem : budgetPeriod.getBudgetLineItems()) {
if (budgetLineItem.getBudgetPersonnelDetailsList() != null && !budgetLineItem.getBudgetPersonnelDetailsList().isEmpty()) {
for (BudgetPersonnelDetails budgetPersonnelDetails : budgetLineItem.getBudgetPersonnelDetailsList()) {
budgetPersonnelDetails.setBudgetId(budgetLineItem.getBudgetId());
budgetPersonnelDetails.setBudgetPeriod(budgetLineItem.getBudgetPeriod());
budgetPersonnelDetails.setLineItemNumber(budgetLineItem.getLineItemNumber());
budgetPersonnelDetails.setCostElement(budgetLineItem.getCostElement());
budgetPersonnelDetails.setCostElementBO(budgetLineItem.getCostElementBO());
}
}
}
}
}
}
@Override
protected PessimisticLockService getPessimisticLockService() {
return KcServiceLocator.getService(BudgetLockService.class);
}
public ActionForward reject(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception {
KualiDocumentFormBase kualiDocumentFormBase = (KualiDocumentFormBase) form;
Object question = request.getParameter(KRADConstants.QUESTION_INST_ATTRIBUTE_NAME);
Object buttonClicked = request.getParameter(KRADConstants.QUESTION_CLICKED_BUTTON);
String reason = request.getParameter(KRADConstants.QUESTION_REASON_ATTRIBUTE_NAME);
String methodToCall = ((KualiForm) form).getMethodToCall();
final String questionText = "Are you sure you want to reject this document?";
ActionForward forward;
if (question == null) {
forward = this.performQuestionWithInput(mapping, form, request, response, DOCUMENT_REJECT_QUESTION,
questionText , KRADConstants.CONFIRMATION_QUESTION, methodToCall, "");
} else if ((DOCUMENT_REJECT_QUESTION.equals(question)) && ConfirmationQuestion.NO.equals(buttonClicked)) {
forward = mapping.findForward(Constants.MAPPING_BASIC);
} else {
if (StringUtils.isEmpty(reason)) {
String context = "";
String errorKey = KeyConstants.ERROR_BUDGET_REJECT_NO_REASON;
String errorPropertyName = DOCUMENT_REJECT_QUESTION;
String errorParameter = "";
reason = reason == null ? "" : reason;
forward = this.performQuestionWithInputAgainBecauseOfErrors(mapping, form, request, response, DOCUMENT_REJECT_QUESTION,
questionText, KRADConstants.CONFIRMATION_QUESTION, methodToCall, context, reason, errorKey, errorPropertyName,
errorParameter);
} else {
//reject the document using the service.
BudgetDocument document = ((BudgetForm)form).getBudgetDocument();
document.documentHasBeenRejected(reason);
KcServiceLocator.getService(KcDocumentRejectionService.class).reject(document.getDocumentNumber(), reason,
GlobalVariables.getUserSession().getPrincipalId());
//tell the document it is being rejected and returned to the initial node.
forward = super.returnToSender(request, mapping, kualiDocumentFormBase);
}
}
return forward;
}
protected BudgetCommonService<BudgetParent> getBudgetCommonService(BudgetParent budgetParent) {
return BudgetCommonServiceFactory.createInstance(budgetParent);
}
/**
* This method is to recalculate the budget period
* @param budgetForm
* @param budget
* @param budgetPeriod
*/
protected void recalculateBudgetPeriod(BudgetForm budgetForm, Budget budget, BudgetPeriod budgetPeriod) {
getBudgetCommonService(budget.getBudgetParent()).recalculateBudgetPeriod(budget, budgetPeriod);
}
protected void calculateBudgetPeriod(Budget budget, BudgetPeriod budgetPeriod) {
getCalculationService().calculateBudgetPeriod(budget, budgetPeriod);
}
/**
* Locates the {@link BudgetCalculationService]
*
* @return {@link BudgetCalculationService} singleton instance
*/
protected BudgetCalculationService getCalculationService() {
return KcServiceLocator.getService(BudgetCalculationService.class);
}
}
| blackcathacker/kc.preclean | coeus-code/src/main/java/org/kuali/coeus/common/budget/framework/core/BudgetAction.java | Java | apache-2.0 | 38,863 |
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.analytics.model.pnl;
import java.util.Collections;
import java.util.Set;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.function.FunctionCompilationContext;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ValuePropertyNames;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueRequirementNames;
import com.opengamma.financial.analytics.OpenGammaFunctionExclusions;
import com.opengamma.financial.property.DefaultPropertyFunction;
import com.opengamma.financial.security.FinancialSecurity;
import com.opengamma.financial.security.option.FXBarrierOptionSecurity;
import com.opengamma.financial.security.option.FXDigitalOptionSecurity;
import com.opengamma.financial.security.option.FXOptionSecurity;
import com.opengamma.financial.security.option.NonDeliverableFXDigitalOptionSecurity;
import com.opengamma.financial.security.option.NonDeliverableFXOptionSecurity;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class FXOptionBlackPnLDefaults extends DefaultPropertyFunction {
private final String _samplingPeriod;
private final String _scheduleCalculator;
private final String _samplingFunction;
public FXOptionBlackPnLDefaults(final String samplingPeriod, final String scheduleCalculator, final String samplingFunction) {
super(ComputationTargetType.POSITION, true);
ArgumentChecker.notNull(samplingPeriod, "sampling period");
ArgumentChecker.notNull(scheduleCalculator, "schedule calculator");
ArgumentChecker.notNull(samplingFunction, "sampling function");
_samplingPeriod = samplingPeriod;
_scheduleCalculator = scheduleCalculator;
_samplingFunction = samplingFunction;
}
@Override
public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
if (!(target.getPosition().getSecurity() instanceof FinancialSecurity)) {
return false;
}
final FinancialSecurity security = (FinancialSecurity) target.getPosition().getSecurity();
final boolean isFXOption = security instanceof FXOptionSecurity
|| security instanceof FXBarrierOptionSecurity
|| security instanceof FXDigitalOptionSecurity
|| security instanceof NonDeliverableFXOptionSecurity
|| security instanceof NonDeliverableFXDigitalOptionSecurity;
return isFXOption;
}
@Override
protected void getDefaults(final PropertyDefaults defaults) {
defaults.addValuePropertyName(ValueRequirementNames.PNL_SERIES, ValuePropertyNames.SAMPLING_PERIOD);
defaults.addValuePropertyName(ValueRequirementNames.PNL_SERIES, ValuePropertyNames.SCHEDULE_CALCULATOR);
defaults.addValuePropertyName(ValueRequirementNames.PNL_SERIES, ValuePropertyNames.SAMPLING_FUNCTION);
}
@Override
protected Set<String> getDefaultValue(final FunctionCompilationContext context, final ComputationTarget target, final ValueRequirement desiredValue,
final String propertyName) {
if (ValuePropertyNames.SAMPLING_PERIOD.equals(propertyName)) {
return Collections.singleton(_samplingPeriod);
}
if (ValuePropertyNames.SCHEDULE_CALCULATOR.equals(propertyName)) {
return Collections.singleton(_scheduleCalculator);
}
if (ValuePropertyNames.SAMPLING_FUNCTION.equals(propertyName)) {
return Collections.singleton(_samplingFunction);
}
return null;
}
@Override
public String getMutualExclusionGroup() {
return OpenGammaFunctionExclusions.PNL_SERIES;
}
}
| McLeodMoores/starling | projects/financial/src/main/java/com/opengamma/financial/analytics/model/pnl/FXOptionBlackPnLDefaults.java | Java | apache-2.0 | 3,692 |
$.STRATUS = function (){
var _init = function () {
$('#logout').click(_logout)
};
var _logout = function (event) {
event.preventDefault();
// Workaround to logout user
// As HTTP is state-less there is no cross-browser clean way to do
$.get(location.href.replace('://', '://x-pdisk-logout@'));
};
return {
init: _init,
};
}();
$(document).ready($.STRATUS.init);
| StratusLab/storage | pdisk-server/war/src/main/webapp/media/js/stratuslab.js | JavaScript | apache-2.0 | 387 |
package com.mkhuda.offlinecache;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import com.android.volley.Response;
import com.android.volley.VolleyError;
import com.android.volley.Request.Method;
import com.android.volley.toolbox.NetworkImageView;
import com.android.volley.toolbox.StringRequest;
import com.mkhuda.offlinecache.models.CacheModels;
import android.app.Activity;
import android.content.Intent;
import android.os.Bundle;
import android.os.Handler;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.Button;
import android.widget.TextView;
import android.widget.Toast;
import java.util.UUID;
public class MainActivity extends Activity {
private static final String TAG = MainActivity.class.getSimpleName();
TextView t1, t2;
Button b0, b;
CacheModels cm;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
cm = new CacheModels(getApplicationContext());
b0 = (Button) findViewById(R.id.button0);
b0.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// TODO Auto-generated method stub
loadfromserver();
}
});
b = (Button) findViewById(R.id.button);
b.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// TODO Auto-generated method stub
t2 = (TextView) findViewById(R.id.text2);
try {
String a = (String) InternalStorage.readObject(getApplicationContext(), "John1");
JSONObject jObj = new JSONObject(a);
t2.setText(jObj.getString("movie_name"));
// for()
} catch (ClassNotFoundException | IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
try {
Toast.makeText(getApplicationContext(), cm.getCache1(), Toast.LENGTH_LONG).show();
} catch (ClassNotFoundException | IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
});
}
private void loadfromserver() {
t1 = (TextView) findViewById(R.id.text1);
StringRequest movieReq = new StringRequest(Method.POST,
"http://labs.mkhuda.com/bisanonton/movie-random.php", new Response.Listener<String>() {
@Override
public void onResponse(String response) {
Log.d(TAG, response.toString());
cm.setCache1(response.toString());
t1.setText("Data Loaded");
try {
JSONObject jObj = new JSONObject(response);
Toast.makeText(getApplicationContext(), "String saved is: "+jObj.getString("movie_name"), Toast.LENGTH_LONG).show();
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}, new Response.ErrorListener() {
@Override
public void onErrorResponse(VolleyError error) {
// VolleyLog.d(TAG, "Error: " + error.getMessage());
Toast.makeText(getApplicationContext(),
"Please Connect To Internet!", Toast.LENGTH_SHORT).show();
}
});
AppController.getInstance().addToRequestQueue(movieReq);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
if (id == R.id.action_settings) {
return true;
}
return super.onOptionsItemSelected(item);
}
}
| mkhuda/AndOfflineMechanism | src/com/mkhuda/offlinecache/MainActivity.java | Java | apache-2.0 | 4,007 |
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/ads/googleads/v9/services/conversion_value_rule_set_service.proto
package com.google.ads.googleads.v9.services;
/**
* <pre>
* The result for the conversion value rule set mutate.
* </pre>
*
* Protobuf type {@code google.ads.googleads.v9.services.MutateConversionValueRuleSetResult}
*/
public final class MutateConversionValueRuleSetResult extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)
MutateConversionValueRuleSetResultOrBuilder {
private static final long serialVersionUID = 0L;
// Use MutateConversionValueRuleSetResult.newBuilder() to construct.
private MutateConversionValueRuleSetResult(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private MutateConversionValueRuleSetResult() {
resourceName_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new MutateConversionValueRuleSetResult();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private MutateConversionValueRuleSetResult(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
java.lang.String s = input.readStringRequireUtf8();
resourceName_ = s;
break;
}
case 18: {
com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder subBuilder = null;
if (conversionValueRuleSet_ != null) {
subBuilder = conversionValueRuleSet_.toBuilder();
}
conversionValueRuleSet_ = input.readMessage(com.google.ads.googleads.v9.resources.ConversionValueRuleSet.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(conversionValueRuleSet_);
conversionValueRuleSet_ = subBuilder.buildPartial();
}
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.class, com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.Builder.class);
}
public static final int RESOURCE_NAME_FIELD_NUMBER = 1;
private volatile java.lang.Object resourceName_;
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return The resourceName.
*/
@java.lang.Override
public java.lang.String getResourceName() {
java.lang.Object ref = resourceName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
resourceName_ = s;
return s;
}
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return The bytes for resourceName.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getResourceNameBytes() {
java.lang.Object ref = resourceName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
resourceName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int CONVERSION_VALUE_RULE_SET_FIELD_NUMBER = 2;
private com.google.ads.googleads.v9.resources.ConversionValueRuleSet conversionValueRuleSet_;
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
* @return Whether the conversionValueRuleSet field is set.
*/
@java.lang.Override
public boolean hasConversionValueRuleSet() {
return conversionValueRuleSet_ != null;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
* @return The conversionValueRuleSet.
*/
@java.lang.Override
public com.google.ads.googleads.v9.resources.ConversionValueRuleSet getConversionValueRuleSet() {
return conversionValueRuleSet_ == null ? com.google.ads.googleads.v9.resources.ConversionValueRuleSet.getDefaultInstance() : conversionValueRuleSet_;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
@java.lang.Override
public com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder getConversionValueRuleSetOrBuilder() {
return getConversionValueRuleSet();
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(resourceName_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, resourceName_);
}
if (conversionValueRuleSet_ != null) {
output.writeMessage(2, getConversionValueRuleSet());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(resourceName_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, resourceName_);
}
if (conversionValueRuleSet_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, getConversionValueRuleSet());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)) {
return super.equals(obj);
}
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult other = (com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult) obj;
if (!getResourceName()
.equals(other.getResourceName())) return false;
if (hasConversionValueRuleSet() != other.hasConversionValueRuleSet()) return false;
if (hasConversionValueRuleSet()) {
if (!getConversionValueRuleSet()
.equals(other.getConversionValueRuleSet())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + RESOURCE_NAME_FIELD_NUMBER;
hash = (53 * hash) + getResourceName().hashCode();
if (hasConversionValueRuleSet()) {
hash = (37 * hash) + CONVERSION_VALUE_RULE_SET_FIELD_NUMBER;
hash = (53 * hash) + getConversionValueRuleSet().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* <pre>
* The result for the conversion value rule set mutate.
* </pre>
*
* Protobuf type {@code google.ads.googleads.v9.services.MutateConversionValueRuleSetResult}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResultOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.class, com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.Builder.class);
}
// Construct using com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
resourceName_ = "";
if (conversionValueRuleSetBuilder_ == null) {
conversionValueRuleSet_ = null;
} else {
conversionValueRuleSet_ = null;
conversionValueRuleSetBuilder_ = null;
}
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return com.google.ads.googleads.v9.services.ConversionValueRuleSetServiceProto.internal_static_google_ads_googleads_v9_services_MutateConversionValueRuleSetResult_descriptor;
}
@java.lang.Override
public com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult getDefaultInstanceForType() {
return com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.getDefaultInstance();
}
@java.lang.Override
public com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult build() {
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult buildPartial() {
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult result = new com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult(this);
result.resourceName_ = resourceName_;
if (conversionValueRuleSetBuilder_ == null) {
result.conversionValueRuleSet_ = conversionValueRuleSet_;
} else {
result.conversionValueRuleSet_ = conversionValueRuleSetBuilder_.build();
}
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult) {
return mergeFrom((com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult other) {
if (other == com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult.getDefaultInstance()) return this;
if (!other.getResourceName().isEmpty()) {
resourceName_ = other.resourceName_;
onChanged();
}
if (other.hasConversionValueRuleSet()) {
mergeConversionValueRuleSet(other.getConversionValueRuleSet());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private java.lang.Object resourceName_ = "";
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return The resourceName.
*/
public java.lang.String getResourceName() {
java.lang.Object ref = resourceName_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
resourceName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return The bytes for resourceName.
*/
public com.google.protobuf.ByteString
getResourceNameBytes() {
java.lang.Object ref = resourceName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
resourceName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @param value The resourceName to set.
* @return This builder for chaining.
*/
public Builder setResourceName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
resourceName_ = value;
onChanged();
return this;
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @return This builder for chaining.
*/
public Builder clearResourceName() {
resourceName_ = getDefaultInstance().getResourceName();
onChanged();
return this;
}
/**
* <pre>
* Returned for successful operations.
* </pre>
*
* <code>string resource_name = 1;</code>
* @param value The bytes for resourceName to set.
* @return This builder for chaining.
*/
public Builder setResourceNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
resourceName_ = value;
onChanged();
return this;
}
private com.google.ads.googleads.v9.resources.ConversionValueRuleSet conversionValueRuleSet_;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.ads.googleads.v9.resources.ConversionValueRuleSet, com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder, com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder> conversionValueRuleSetBuilder_;
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
* @return Whether the conversionValueRuleSet field is set.
*/
public boolean hasConversionValueRuleSet() {
return conversionValueRuleSetBuilder_ != null || conversionValueRuleSet_ != null;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
* @return The conversionValueRuleSet.
*/
public com.google.ads.googleads.v9.resources.ConversionValueRuleSet getConversionValueRuleSet() {
if (conversionValueRuleSetBuilder_ == null) {
return conversionValueRuleSet_ == null ? com.google.ads.googleads.v9.resources.ConversionValueRuleSet.getDefaultInstance() : conversionValueRuleSet_;
} else {
return conversionValueRuleSetBuilder_.getMessage();
}
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public Builder setConversionValueRuleSet(com.google.ads.googleads.v9.resources.ConversionValueRuleSet value) {
if (conversionValueRuleSetBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
conversionValueRuleSet_ = value;
onChanged();
} else {
conversionValueRuleSetBuilder_.setMessage(value);
}
return this;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public Builder setConversionValueRuleSet(
com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder builderForValue) {
if (conversionValueRuleSetBuilder_ == null) {
conversionValueRuleSet_ = builderForValue.build();
onChanged();
} else {
conversionValueRuleSetBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public Builder mergeConversionValueRuleSet(com.google.ads.googleads.v9.resources.ConversionValueRuleSet value) {
if (conversionValueRuleSetBuilder_ == null) {
if (conversionValueRuleSet_ != null) {
conversionValueRuleSet_ =
com.google.ads.googleads.v9.resources.ConversionValueRuleSet.newBuilder(conversionValueRuleSet_).mergeFrom(value).buildPartial();
} else {
conversionValueRuleSet_ = value;
}
onChanged();
} else {
conversionValueRuleSetBuilder_.mergeFrom(value);
}
return this;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public Builder clearConversionValueRuleSet() {
if (conversionValueRuleSetBuilder_ == null) {
conversionValueRuleSet_ = null;
onChanged();
} else {
conversionValueRuleSet_ = null;
conversionValueRuleSetBuilder_ = null;
}
return this;
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder getConversionValueRuleSetBuilder() {
onChanged();
return getConversionValueRuleSetFieldBuilder().getBuilder();
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
public com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder getConversionValueRuleSetOrBuilder() {
if (conversionValueRuleSetBuilder_ != null) {
return conversionValueRuleSetBuilder_.getMessageOrBuilder();
} else {
return conversionValueRuleSet_ == null ?
com.google.ads.googleads.v9.resources.ConversionValueRuleSet.getDefaultInstance() : conversionValueRuleSet_;
}
}
/**
* <pre>
* The mutated conversion value rule set with only mutable fields after
* mutate. The field will only be returned when response_content_type is set
* to "MUTABLE_RESOURCE".
* </pre>
*
* <code>.google.ads.googleads.v9.resources.ConversionValueRuleSet conversion_value_rule_set = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.ads.googleads.v9.resources.ConversionValueRuleSet, com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder, com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder>
getConversionValueRuleSetFieldBuilder() {
if (conversionValueRuleSetBuilder_ == null) {
conversionValueRuleSetBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
com.google.ads.googleads.v9.resources.ConversionValueRuleSet, com.google.ads.googleads.v9.resources.ConversionValueRuleSet.Builder, com.google.ads.googleads.v9.resources.ConversionValueRuleSetOrBuilder>(
getConversionValueRuleSet(),
getParentForChildren(),
isClean());
conversionValueRuleSet_ = null;
}
return conversionValueRuleSetBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)
}
// @@protoc_insertion_point(class_scope:google.ads.googleads.v9.services.MutateConversionValueRuleSetResult)
private static final com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult();
}
public static com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<MutateConversionValueRuleSetResult>
PARSER = new com.google.protobuf.AbstractParser<MutateConversionValueRuleSetResult>() {
@java.lang.Override
public MutateConversionValueRuleSetResult parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new MutateConversionValueRuleSetResult(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<MutateConversionValueRuleSetResult> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<MutateConversionValueRuleSetResult> getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.ads.googleads.v9.services.MutateConversionValueRuleSetResult getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
| googleads/google-ads-java | google-ads-stubs-v9/src/main/java/com/google/ads/googleads/v9/services/MutateConversionValueRuleSetResult.java | Java | apache-2.0 | 32,654 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
namespace Apworks.Messaging
{
public abstract class MessageHandlerExecutionContext : IMessageHandlerExecutionContext
{
public void RegisterHandler<TMessage, THandler>()
where TMessage : IMessage
where THandler : IMessageHandler<TMessage>
=> RegisterHandler(typeof(TMessage), typeof(THandler));
public bool HandlerRegistered<TMessage, THandler>()
where TMessage : IMessage
where THandler : IMessageHandler<TMessage>
=> HandlerRegistered(typeof(TMessage), typeof(THandler));
public abstract void RegisterHandler(Type messageType, Type handlerType);
public abstract bool HandlerRegistered(Type messageType, Type handlerType);
public abstract Task HandleMessageAsync(IMessage message, CancellationToken cancellationToken = default(CancellationToken));
}
}
| daxnet/apworks-core | src/Apworks/Messaging/MessageHandlerExecutionContext.cs | C# | apache-2.0 | 1,024 |
// Code generated by go-swagger; DO NOT EDIT.
package routes
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
"github.com/funcy/functions_go/models"
)
// DeleteAppsAppRoutesRouteReader is a Reader for the DeleteAppsAppRoutesRoute structure.
type DeleteAppsAppRoutesRouteReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteAppsAppRoutesRouteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteAppsAppRoutesRouteOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewDeleteAppsAppRoutesRouteNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
result := NewDeleteAppsAppRoutesRouteDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewDeleteAppsAppRoutesRouteOK creates a DeleteAppsAppRoutesRouteOK with default headers values
func NewDeleteAppsAppRoutesRouteOK() *DeleteAppsAppRoutesRouteOK {
return &DeleteAppsAppRoutesRouteOK{}
}
/*DeleteAppsAppRoutesRouteOK handles this case with default header values.
Route successfully deleted.
*/
type DeleteAppsAppRoutesRouteOK struct {
}
func (o *DeleteAppsAppRoutesRouteOK) Error() string {
return fmt.Sprintf("[DELETE /apps/{app}/routes/{route}][%d] deleteAppsAppRoutesRouteOK ", 200)
}
func (o *DeleteAppsAppRoutesRouteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteAppsAppRoutesRouteNotFound creates a DeleteAppsAppRoutesRouteNotFound with default headers values
func NewDeleteAppsAppRoutesRouteNotFound() *DeleteAppsAppRoutesRouteNotFound {
return &DeleteAppsAppRoutesRouteNotFound{}
}
/*DeleteAppsAppRoutesRouteNotFound handles this case with default header values.
Route does not exist.
*/
type DeleteAppsAppRoutesRouteNotFound struct {
Payload *models.Error
}
func (o *DeleteAppsAppRoutesRouteNotFound) Error() string {
return fmt.Sprintf("[DELETE /apps/{app}/routes/{route}][%d] deleteAppsAppRoutesRouteNotFound %+v", 404, o.Payload)
}
func (o *DeleteAppsAppRoutesRouteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteAppsAppRoutesRouteDefault creates a DeleteAppsAppRoutesRouteDefault with default headers values
func NewDeleteAppsAppRoutesRouteDefault(code int) *DeleteAppsAppRoutesRouteDefault {
return &DeleteAppsAppRoutesRouteDefault{
_statusCode: code,
}
}
/*DeleteAppsAppRoutesRouteDefault handles this case with default header values.
Unexpected error
*/
type DeleteAppsAppRoutesRouteDefault struct {
_statusCode int
Payload *models.Error
}
// Code gets the status code for the delete apps app routes route default response
func (o *DeleteAppsAppRoutesRouteDefault) Code() int {
return o._statusCode
}
func (o *DeleteAppsAppRoutesRouteDefault) Error() string {
return fmt.Sprintf("[DELETE /apps/{app}/routes/{route}][%d] DeleteAppsAppRoutesRoute default %+v", o._statusCode, o.Payload)
}
func (o *DeleteAppsAppRoutesRouteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| funcy/functions_go | client/routes/delete_apps_app_routes_route_responses.go | GO | apache-2.0 | 4,056 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/fully_connected.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/optimized/sparse_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/fully_connected.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/sparse_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace fully_connected {
namespace {
bool SupportedSparsityFormat(const TfLiteSparsity& sparsity) {
if (sparsity.dim_metadata[0].format == kTfLiteDimDense &&
sparsity.dim_metadata[1].format == kTfLiteDimSparseCSR) {
return true;
}
return false;
}
static const int kDimMetadataSizeRandomSparse = 2;
static const int kDimMetadataSizeBlockSparse = 3;
TfLiteStatus CreateLedgerTensor(const TfLiteSparsity* sparsity,
TfLiteContext* context, TfLiteTensor* ledger) {
TF_LITE_ENSURE(context, sparsity != nullptr);
ledger->type = kTfLiteUInt8;
ledger->allocation_type = kTfLiteArenaRwPersistent;
TfLiteIntArray* ledger_size = TfLiteIntArrayCreate(1);
ledger_size->data[0] = sparsity->dim_metadata[1].array_indices->size +
sparsity->dim_metadata[1].array_segments->size - 1;
return context->ResizeTensor(context, ledger, ledger_size);
}
TfLiteStatus PopulateLedgerData(const TfLiteSparsity* sparsity,
TfLiteContext* context, uint8_t* ledger_data) {
TF_LITE_ENSURE(context, sparsity != nullptr);
const auto* array_segments = sparsity->dim_metadata[1].array_segments;
const auto* array_indices = sparsity->dim_metadata[1].array_indices;
int output_data_ptr = 0;
for (int i = 0; i < array_segments->size - 1; i++) {
int row_start = array_segments->data[i];
int row_end = array_segments->data[i + 1];
if (row_end - row_start > UINT8_MAX) {
return kTfLiteError;
}
// Copy num of non-zero blocks in row i.
ledger_data[output_data_ptr] = static_cast<uint8_t>(row_end - row_start);
output_data_ptr++;
for (int j = row_start; j < row_end; j++) {
if (array_indices->data[j] > UINT8_MAX) {
return kTfLiteError;
}
// Copy indices of non-zero blocks in row i.
ledger_data[output_data_ptr] =
static_cast<uint8_t>(array_indices->data[j]);
output_data_ptr++;
}
}
return kTfLiteOk;
}
} // namespace
// This file has four implementations of FullyConnected
enum KernelType {
kReference,
kGenericOptimized,
kLegacyPie, // Legacy path used by the PIE team and related clients.
};
struct OpData {
// The scaling factor from input to output (aka the 'real multiplier') can
// be represented as a fixed point multiplier plus a left shift.
int32_t output_multiplier;
int output_shift;
// The range of the fused activation layer. For example for kNone and
// uint8_t these would be 0 and 255.
int32_t output_activation_min;
int32_t output_activation_max;
// The index of the temporary tensor where the quantized inputs are cached.
int scratch_tensor_index;
bool compute_row_sums = false;
// Only used for sparse hybrid fully connected kernels.
bool ledger_initialized;
};
constexpr int kInputTensor = 0;
constexpr int kWeightsTensor = 1;
constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0;
constexpr int kShuffledInputWorkspaceTensor = 1;
inline TfLiteStatus CheckTypes(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output,
TfLiteFullyConnectedParams* params) {
const bool is_quantized =
((filter->type == kTfLiteUInt8) || (filter->type == kTfLiteInt8));
const bool is_hybrid = is_quantized && (input->type == kTfLiteFloat32);
const bool is_shuffled =
is_quantized && (params->weights_format ==
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8);
// optional bias tensor.
const bool is_optional_bias_float = !bias || (bias->type == kTfLiteFloat32);
const bool is_optional_bias_int =
!bias || (bias->type == kTfLiteInt32) || (bias->type == kTfLiteInt64);
if (is_quantized) {
if (is_shuffled) {
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt8);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteUInt8);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt16);
TF_LITE_ENSURE_EQ(context, is_optional_bias_int, true);
} else if (is_hybrid) {
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, is_optional_bias_float, true);
} else {
TF_LITE_ENSURE(context, input->type == kTfLiteUInt8 ||
input->type == kTfLiteInt8 ||
input->type == kTfLiteInt16);
TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 ||
output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16);
TF_LITE_ENSURE_EQ(context, is_optional_bias_int, true);
}
} else {
// Only float32 is supported currently
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, is_optional_bias_float, true);
}
return kTfLiteOk;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
// This is a builtin op, so we don't use the contents in 'buffer', if any.
// Instead, we allocate a new object to carry information from Prepare() to
// Eval().
auto* op_data = new OpData();
context->AddTensors(context, /*tensors_to_add=*/6,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
// Check we have all the inputs and outputs we need.
TF_LITE_ENSURE(context, node->inputs->size == 2 || node->inputs->size == 3);
// Shuffled formats need a workspace to store the shuffled input activations.
const int expected_outputs_count =
params->weights_format == kTfLiteFullyConnectedWeightsFormatDefault ? 1
: 2;
TF_LITE_ENSURE_EQ(context, node->outputs->size, expected_outputs_count);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* bias =
(node->inputs->size == 3)
? GetOptionalInputTensor(context, node, kBiasTensor)
: nullptr;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// Check proper datatype match among all Input Tensors
TF_LITE_ENSURE_STATUS(
CheckTypes(context, input, filter, bias, output, params));
// Check all the parameters of tensor match within themselves and match the
// input configuration.
int input_size = 1;
for (int i = 0; i < input->dims->size; i++) {
input_size *= input->dims->data[i];
}
TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 2);
TF_LITE_ENSURE(context, filter->dims->data[1] != 0);
const int batch_size = input_size / filter->dims->data[1];
const int num_units = filter->dims->data[0];
if (bias) {
TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0));
}
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8 ||
input->type == kTfLiteInt16) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, input, filter, bias, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent);
data->output_shift = exponent;
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
}
if (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
// If we have to perform on-the-fly quantization (with quantized weights and
// float inputs) first we need to quantize the inputs. Allocate a temporary
// buffer to store the intermediate quantized values.
// Additionally, we allocate a temporary buffer to store the accumulated
// quantized values prior to multiplication by the scaling factor.
const bool is_hybrid =
(input->type == kTfLiteFloat32 &&
(filter->type == kTfLiteUInt8 || filter->type == kTfLiteInt8));
const bool is_sparse = filter->sparsity != nullptr;
if (is_hybrid) {
TfLiteIntArrayFree(node->temporaries);
data->compute_row_sums = true;
if (is_sparse) {
node->temporaries = TfLiteIntArrayCreate(6);
} else {
node->temporaries = TfLiteIntArrayCreate(5);
}
node->temporaries->data[0] = data->scratch_tensor_index;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/0,
&input_quantized));
input_quantized->type = filter->type;
input_quantized->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
node->temporaries->data[1] = data->scratch_tensor_index + 1;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[2] = data->scratch_tensor_index + 2;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/2, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = num_units;
accum_size->data[1] = batch_size;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[3] = data->scratch_tensor_index + 3;
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/3, &input_offsets));
input_offsets->type = kTfLiteInt32;
input_offsets->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) {
TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1);
input_offsets_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets,
input_offsets_size));
}
node->temporaries->data[4] = data->scratch_tensor_index + 4;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/4, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_units};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1);
row_sums_size->data[0] = row_sums_dims[0];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
if (is_sparse) {
data->ledger_initialized = false;
node->temporaries->data[5] = data->scratch_tensor_index + 5;
TfLiteTensor* filter_ledger =
&context->tensors[node->temporaries->data[5]];
auto status =
CreateLedgerTensor(filter->sparsity, context, filter_ledger);
if (status != kTfLiteOk) return status;
}
}
// Resize output.
TfLiteIntArray* output_size_array = nullptr;
if (params->keep_num_dims) {
// When number of dimensions are kept the filter operates along the last
// dimensions. In other words, for an input tensor with shape
// [batch_size, ..., n_inputs] and a filter of shape [n_inputs, n_units]
// this Op produces an output of shape [batch_size, ..., n_units].
TF_LITE_ENSURE_EQ(context, input->dims->data[input->dims->size - 1],
SizeOfDimension(filter, 1));
output_size_array = TfLiteIntArrayCopy(input->dims);
output_size_array->data[output_size_array->size - 1] = num_units;
} else {
// Otherwise, the output is (potentially flattened to) a 2-D matrix.
output_size_array = TfLiteIntArrayCreate(2);
output_size_array->data[0] = batch_size;
output_size_array->data[1] = num_units;
}
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Check for supported activation types.
auto* params =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const bool is_quantized =
((filter->type == kTfLiteUInt8) || (filter->type == kTfLiteInt8));
const bool is_hybrid = is_quantized && (input->type == kTfLiteFloat32);
const bool is_pie = kernel_type == kLegacyPie;
// Pie and hybrid path supports all kinds of fused activations, otherwise only
// clipping activations are supported.
if (!is_pie && !is_hybrid) {
TF_LITE_ENSURE(context, params->activation == kTfLiteActNone ||
params->activation == kTfLiteActRelu ||
params->activation == kTfLiteActReluN1To1 ||
params->activation == kTfLiteActRelu6);
}
return PrepareImpl(context, node);
}
TfLiteStatus EvalPie(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) {
int total_input_size = 1;
for (int i = 0; i < input->dims->size; i++) {
total_input_size *= input->dims->data[i];
}
int input_size = filter->dims->data[1];
const int batch_size = total_input_size / filter->dims->data[1];
const int num_units = filter->dims->data[0];
// Output = bias if bias tensor exists.
if (bias) {
tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias), num_units,
batch_size,
GetTensorData<float>(output));
} else {
std::fill_n(GetTensorData<float>(output), batch_size * num_units, 0.0f);
}
// Compute output += weight * input
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
GetTensorData<float>(filter), num_units, input_size,
GetTensorData<float>(input), batch_size, GetTensorData<float>(output));
// Apply activation function
tensor_utils::ApplyActivationToVector(
GetTensorData<float>(output), batch_size * num_units, params->activation,
GetTensorData<float>(output));
return kTfLiteOk;
}
TfLiteStatus EvalHybridDense(
TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* input_quantized, TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
int total_input_size = 1;
for (int i = 0; i < input->dims->size; i++) {
total_input_size *= input->dims->data[i];
}
const int input_size = filter->dims->data[1];
const int batch_size = total_input_size / filter->dims->data[1];
const int num_units = filter->dims->data[0];
// Output = bias if bias tensor exists.
if (bias) {
tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias), num_units,
batch_size,
GetTensorData<float>(output));
} else {
std::fill_n(GetTensorData<float>(output), batch_size * num_units, 0.0f);
}
// Save matrix multiplication computation for all zero input.
if (tensor_utils::IsZeroVector(GetTensorData<float>(input),
total_input_size)) {
tensor_utils::ApplyActivationToVector(
GetTensorData<float>(output), batch_size * num_units,
params->activation, GetTensorData<float>(output));
return kTfLiteOk;
}
// Quantize input from float to uint8 + quantization params (scaling factor).
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* input_offset_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
input_offset_ptr = GetTensorData<int32_t>(input_offsets);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
int8_t* quant_data = GetTensorData<int8_t>(input_quantized);
const int8_t* filter_data = GetTensorData<int8_t>(filter);
const float* input_ptr = GetTensorData<float>(input);
tensor_utils::BatchQuantizeFloats(
input_ptr, batch_size, input_size, quant_data, scaling_factors_ptr,
input_offset_ptr, params->asymmetric_quantize_inputs);
for (int b = 0; b < batch_size; ++b) {
// Incorporate scaling of the filter.
scaling_factors_ptr[b] *= filter->params.scale;
}
// Compute output += weight * quantized_input
int32_t* scratch = GetTensorData<int32_t>(accum_scratch);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
filter_data, num_units, input_size, quant_data, scaling_factors_ptr,
batch_size, GetTensorData<float>(output), /*per_channel_scale=*/nullptr,
input_offset_ptr, scratch, row_sums_ptr, &data->compute_row_sums,
CpuBackendContext::GetFromContext(context));
// Apply activation function to floats.
tensor_utils::ApplyActivationToVector(
GetTensorData<float>(output), batch_size * num_units, params->activation,
GetTensorData<float>(output));
return kTfLiteOk;
}
void EvalSparseHybridImpl(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, int thread_start,
int thread_end, TfLiteTensor* input_quantized,
TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
ruy::profiler::ScopeLabel label("FullyConnected");
ruy::profiler::ScopeLabel inner_label("Sparse Hybrid Kernel");
const auto& input_shape = GetTensorShape(input);
const auto& output_shape = GetTensorShape(output);
const auto& filter_shape = GetTensorShape(filter);
const int input_dims_count = input_shape.DimensionsCount();
const int output_dims_count = output_shape.DimensionsCount();
const int filter_dims_count = filter_shape.DimensionsCount();
const int batch_size = thread_end - thread_start;
const int input_depth = MatchingDim(filter_shape, filter_dims_count - 1,
input_shape, input_dims_count - 1);
const int output_depth = MatchingDim(filter_shape, filter_dims_count - 2,
output_shape, output_dims_count - 1);
const int per_thread_input_size = batch_size * input_depth;
const float* per_thread_input =
GetTensorData<float>(input) + thread_start * input_depth;
float* per_thread_output =
GetTensorData<float>(output) + thread_start * output_depth;
// Output = bias if bias tensor exists.
if (bias) {
tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias),
output_depth, batch_size,
per_thread_output);
} else {
std::fill_n(per_thread_output, batch_size * output_depth, 0.0f);
}
// Save matrix multiplication computation for all zero input.
if (tensor_utils::IsZeroVector(per_thread_input, per_thread_input_size)) {
tensor_utils::ApplyActivationToVector(
per_thread_output, batch_size * output_depth, params->activation,
per_thread_output);
return;
}
// Quantize input from float to uint8 + quantization params (scaling factor).
float* scaling_factors_ptr =
GetTensorData<float>(scaling_factors) + thread_start;
int32_t* input_offset_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
input_offset_ptr = GetTensorData<int32_t>(input_offsets) + thread_start;
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
int8_t* quant_data =
GetTensorData<int8_t>(input_quantized) + thread_start * input_depth;
tensor_utils::BatchQuantizeFloats(per_thread_input, batch_size, input_depth,
quant_data, scaling_factors_ptr,
input_offset_ptr,
params->asymmetric_quantize_inputs);
for (int b = 0; b < batch_size; ++b) {
// Incorporate scaling of the filter.
scaling_factors_ptr[b] *= filter->params.scale;
}
if (params->asymmetric_quantize_inputs) {
float* per_thread_output_ptr = per_thread_output;
for (int b = 0; b < batch_size; ++b) {
const float scaled_zp = scaling_factors_ptr[b] * input_offset_ptr[b];
for (int row = 0; row < output_depth; ++row) {
*per_thread_output_ptr++ -= scaled_zp * row_sums_ptr[row];
}
}
}
// Compute output += weight * quantized_input
TfLiteTensor* filter_ledger = &context->tensors[node->temporaries->data[5]];
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
GetTensorData<int8_t>(filter), GetTensorData<uint8_t>(filter_ledger),
output_depth, input_depth, quant_data, scaling_factors_ptr, batch_size,
per_thread_output);
// Apply activation function to floats.
tensor_utils::ApplyActivationToVector(per_thread_output,
batch_size * output_depth,
params->activation, per_thread_output);
}
struct SparseHybridFullyConnectedTask : cpu_backend_threadpool::Task {
SparseHybridFullyConnectedTask(
TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, const int thread_start, const int thread_end,
TfLiteTensor* input_quantized, TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output)
: context(context),
node(node),
params(params),
data(data),
input(input),
filter(filter),
bias(bias),
thread_start(thread_start),
thread_end(thread_end),
input_quantized(input_quantized),
scaling_factors(scaling_factors),
accum_scratch(accum_scratch),
row_sums(row_sums),
input_offsets(input_offsets),
output(output) {}
void Run() override {
EvalSparseHybridImpl(context, node, params, data, input, filter, bias,
thread_start, thread_end, input_quantized,
scaling_factors, accum_scratch, row_sums,
input_offsets, output);
}
private:
TfLiteContext* context;
TfLiteNode* node;
TfLiteFullyConnectedParams* params;
OpData* data;
const TfLiteTensor* input;
const TfLiteTensor* filter;
const TfLiteTensor* bias;
const int thread_start;
const int thread_end;
TfLiteTensor* input_quantized;
TfLiteTensor* scaling_factors;
TfLiteTensor* accum_scratch;
TfLiteTensor* row_sums;
TfLiteTensor* input_offsets;
TfLiteTensor* output;
};
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* input_quantized,
TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
const auto& output_shape = GetTensorShape(output);
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
const bool is_dense = filter->sparsity == nullptr;
if (is_dense) {
return EvalHybridDense(context, node, params, data, input, filter, bias,
input_quantized, scaling_factors, accum_scratch,
row_sums, input_offsets, output);
}
TfLiteTensor* filter_ledger = &context->tensors[node->temporaries->data[5]];
if (!data->ledger_initialized) {
PopulateLedgerData(filter->sparsity, context,
GetTensorData<uint8_t>(filter_ledger));
data->ledger_initialized = true;
}
// The multi-threaded kernel slices the workload along the batch dimension. If
// there's not enough batches of data, the number of threads used is equal to
// the batch size.
// TODO(b/173442777): If needed, we can improve this later with slicing along
// the row dimension of the weight.
const int max_threads = cpu_backend_context->max_num_threads();
const int batches =
FlatSizeSkipDim(output_shape, output_shape.DimensionsCount() - 1);
const int thread_count = std::max(1, std::min(batches, max_threads));
if (params->asymmetric_quantize_inputs && data->compute_row_sums) {
// Precompute row sums.
static const int kBlockSize = 16;
const uint8_t* ledger_ptr = GetTensorData<uint8_t>(filter_ledger);
const int8_t* row_ptr = GetTensorData<int8_t>(filter);
const int output_depth = filter->dims->data[0];
int32_t* row_sums_ptr = GetTensorData<int32_t>(row_sums);
for (int row = 0; row < output_depth; ++row) {
int32_t row_sum = 0;
int num_nonzero_blocks = *ledger_ptr++;
for (int i = 0; i < num_nonzero_blocks; ++i, ++ledger_ptr) {
for (int c = 0; c < kBlockSize; c++) {
row_sum += (*row_ptr++);
}
}
row_sums_ptr[row] = row_sum;
}
data->compute_row_sums = false;
}
std::vector<SparseHybridFullyConnectedTask> tasks;
tasks.reserve(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
// This makes sure the workload is relatively balanced when batches is not
// a multiple of thread_count. The first mod(batches, thread_count) tasks
// need to process one more batch than the rest.
int thread_end = thread_start + batches / thread_count;
if (i < batches % thread_count) thread_end++;
tasks.emplace_back(context, node, params, data, input, filter, bias,
thread_start, thread_end, input_quantized,
scaling_factors, accum_scratch, row_sums, input_offsets,
output);
thread_start = thread_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
return kTfLiteOk;
}
namespace {
template <KernelType kernel_type>
void FullyConnectedInt8(const OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* output,
CpuBackendContext* cpu_backend_context) {
FullyConnectedParams op_params;
op_params.input_offset = -input->params.zero_point;
op_params.weights_offset = -filter->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(filter);
op_params.rhs_cacheable = IsConstantTensor(input);
if (kernel_type == kReference) {
reference_integer_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(filter), GetTensorData<int8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_integer_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(filter), GetTensorData<int8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int8_t>(output),
cpu_backend_context);
}
}
} // namespace
namespace {
template <KernelType kernel_type>
void FullyConnectedInt16(const OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* output) {
FullyConnectedParams op_params;
op_params.weights_offset = -filter->params.zero_point;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
reference_integer_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(filter), GetTensorData<int8_t>(filter),
GetTensorShape(bias), GetTensorData<int64_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output));
}
} // namespace
template <KernelType kernel_type>
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* output) {
int32_t input_offset = -input->params.zero_point;
int32_t filter_offset = -filter->params.zero_point;
int32_t output_offset = output->params.zero_point;
// Only the Pie path supports quantized models and float inputs/outputs.
if (input->type == kTfLiteFloat32) {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/0,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&scaling_factors));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/2, &accum_scratch));
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/3, &input_offsets));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/4, &row_sums));
return EvalHybrid(context, node, params, data, input, filter, bias,
input_quantized, scaling_factors, accum_scratch, row_sums,
input_offsets, output);
} else {
FullyConnectedParams op_params;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(filter);
op_params.rhs_cacheable = IsConstantTensor(input);
switch (output->type) {
case kTfLiteUInt8:
if (kernel_type == kReference) {
reference_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<uint8_t>(output),
CpuBackendContext::GetFromContext(context));
}
break;
case kTfLiteInt8:
FullyConnectedInt8<kernel_type>(
data, input, filter, bias, output,
CpuBackendContext::GetFromContext(context));
break;
case kTfLiteInt16:
if (input->type == kTfLiteInt16) {
// To avoid 32bit accum overflow, it enables RUY only
// when zero_point is 0.
bool has_non_zero_point = input->params.zero_point ||
filter->params.zero_point ||
output->params.zero_point;
if (kernel_type == kReference || has_non_zero_point ||
(bias && bias->type == kTfLiteInt64)) {
FullyConnectedInt16<kernel_type>(data, input, filter, bias, output);
} else {
optimized_integer_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(filter), GetTensorData<int8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
CpuBackendContext::GetFromContext(context));
}
} else if (kernel_type == kReference) {
reference_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output));
} else {
optimized_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
CpuBackendContext::GetFromContext(context));
}
break;
default:
context->ReportError(context,
"Quantized FullyConnected expects output data "
"type uint8, int8 or int16");
return kTfLiteError;
}
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params,
OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias,
TfLiteTensor* output,
TfLiteTensor* shuffled_input_workspace) {
// TODO(b/110697972) decide more consistently if / how / where we want
// to perform this kind of runtime data type checks.
if (shuffled_input_workspace->type != kTfLiteUInt8) {
context->ReportError(context, "Unexpected data type");
return kTfLiteError;
}
#define TF_LITE_SHUFFLED_FULLY_CONNECTED(type) \
{ \
type::ShuffledFullyConnected( \
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), \
GetTensorShape(filter), GetTensorData<uint8_t>(filter), \
GetTensorShape(bias), GetTensorData<int32_t>(bias), \
GetTensorShape(output), GetTensorData<int16_t>(output), \
GetTensorData<uint8_t>(shuffled_input_workspace), \
CpuBackendContext::GetFromContext(context)); \
}
FullyConnectedParams op_params;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(filter);
op_params.rhs_cacheable = IsConstantTensor(input);
if (kernel_type == kReference) {
reference_ops::ShuffledFullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
GetTensorData<uint8_t>(shuffled_input_workspace));
} else {
optimized_ops::ShuffledFullyConnected(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
GetTensorData<uint8_t>(shuffled_input_workspace),
CpuBackendContext::GetFromContext(context));
}
#undef TF_LITE_SHUFFLED_FULLY_CONNECTED
return kTfLiteOk;
}
// Verifies that sparsity values are valid given input/weight/output.
bool VerifySparsity(const RuntimeShape& weights_shape,
const RuntimeShape& input_shape,
const RuntimeShape& output_shape,
const TfLiteSparsity* sparsity) {
const int weights_dims_count = weights_shape.DimensionsCount();
const int output_dims_count = output_shape.DimensionsCount();
const int w0_size = sparsity->dim_metadata[0].dense_size;
const int accum_depth = weights_shape.Dims(weights_dims_count - 1);
const int output_elements = output_shape.FlatSize();
const int input_elements = input_shape.FlatSize();
const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1);
const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2,
output_shape, output_dims_count - 1);
const int max_batch_index = batches - 1;
const int max_output = max_batch_index * output_depth + w0_size;
const int max_batch_depth = accum_depth * max_batch_index;
// Verify output size is enough.
if (output_elements < max_output) return false;
// Verify index from sparse in input is valid.
for (int i = 0; i < sparsity->dim_metadata[1].array_indices->size; ++i) {
if (input_elements <=
max_batch_depth + sparsity->dim_metadata[1].array_indices->data[i])
return false;
}
return true;
}
template <KernelType kernel_type>
TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) {
float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
if (kernel_type == kReference) {
FullyConnectedParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
if (filter->sparsity != nullptr) {
const auto& sparsity = *filter->sparsity;
reference_ops::FullyConnectedSparseWeight(
sparsity, op_params, GetTensorShape(input),
GetTensorData<float>(input), GetTensorShape(filter),
GetTensorData<float>(filter), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorShape(output),
GetTensorData<float>(output));
} else {
reference_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output));
}
} else if (kernel_type == kLegacyPie) {
return EvalPie(context, node, params, data, input, filter, bias, output);
} else {
FullyConnectedParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
if (filter->sparsity != nullptr) {
const auto& sparsity = *filter->sparsity;
if (!SupportedSparsityFormat(sparsity)) {
TF_LITE_KERNEL_LOG(context,
"Unsupported sparse fully-connected weight format.");
return kTfLiteError;
}
const auto& input_shape = GetTensorShape(input);
const auto& filter_shape = GetTensorShape(filter);
const auto& output_shape = GetTensorShape(output);
const auto& bias_shape = GetTensorShape(bias);
if (!VerifySparsity(filter_shape, input_shape, output_shape, &sparsity)) {
TF_LITE_KERNEL_LOG(context, "Invalid sparse fully-connected format.");
return kTfLiteError;
}
if (sparsity.dim_metadata_size == kDimMetadataSizeRandomSparse) {
// Random sparse.
optimized_ops::FullyConnectedSparseWeight(
sparsity, op_params, // Disable formatting
input_shape, GetTensorData<float>(input), // Disable formatting
filter_shape, GetTensorData<float>(filter), // Disable formatting
bias_shape, GetTensorData<float>(bias), // Disable formatting
output_shape, GetTensorData<float>(output));
} else if (sparsity.dim_metadata_size == kDimMetadataSizeBlockSparse &&
sparsity.dim_metadata[2].dense_size == 4) {
// Block sparse with block size of 1x4.
optimized_ops::FullyConnectedSparseWeight1x4(
sparsity, op_params, // Disable formatting
input_shape, GetTensorData<float>(input), // Disable formatting
filter_shape, GetTensorData<float>(filter), // Disable formatting
bias_shape, GetTensorData<float>(bias), // Disable formatting
output_shape, GetTensorData<float>(output),
CpuBackendContext::GetFromContext(context));
} else {
TF_LITE_KERNEL_LOG(context,
"Unsupported sparse fully-connected weight format.");
return kTfLiteError;
}
} else {
op_params.lhs_cacheable = IsConstantTensor(filter);
op_params.rhs_cacheable = IsConstantTensor(input);
optimized_ops::FullyConnected(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
CpuBackendContext::GetFromContext(context));
}
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* bias =
(node->inputs->size == 3)
? GetOptionalInputTensor(context, node, kBiasTensor)
: nullptr;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// Do nothing if expected output is empty.
if (NumElements(output) == 0) {
return kTfLiteOk;
}
switch (filter->type) {
case kTfLiteFloat32:
return EvalFloat<kernel_type>(context, node, params, data, input, filter,
bias, output);
case kTfLiteUInt8:
if (params->weights_format ==
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8) {
TfLiteTensor* shuffled_input_workspace;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kShuffledInputWorkspaceTensor,
&shuffled_input_workspace));
return EvalShuffledQuantized<kernel_type>(context, node, params, data,
input, filter, bias, output,
shuffled_input_workspace);
} else if (params->weights_format ==
kTfLiteFullyConnectedWeightsFormatDefault) {
return EvalQuantized<kernel_type>(context, node, params, data, input,
filter, bias, output);
} else {
context->ReportError(context,
"Unhandled fully-connected weights format");
return kTfLiteError;
}
case kTfLiteInt8:
if (params->weights_format == kTfLiteFullyConnectedWeightsFormatDefault) {
return EvalQuantized<kernel_type>(context, node, params, data, input,
filter, bias, output);
} else {
context->ReportError(context,
"Unhandled fully-connected weights format");
return kTfLiteError;
}
default:
context->ReportError(context,
"Filter data type %s currently not supported.",
TfLiteTypeGetName(filter->type));
return kTfLiteError;
}
return kTfLiteOk;
}
} // namespace fully_connected
TfLiteRegistration* Register_FULLY_CONNECTED_REF() {
static TfLiteRegistration r = {
fully_connected::Init, fully_connected::Free,
fully_connected::Prepare<fully_connected::kReference>,
fully_connected::Eval<fully_connected::kReference>};
return &r;
}
TfLiteRegistration* Register_FULLY_CONNECTED_GENERIC_OPT() {
static TfLiteRegistration r = {
fully_connected::Init, fully_connected::Free,
fully_connected::Prepare<fully_connected::kGenericOptimized>,
fully_connected::Eval<fully_connected::kGenericOptimized>};
return &r;
}
// Legacy path for PIE clients.
TfLiteRegistration* Register_FULLY_CONNECTED_PIE() {
static TfLiteRegistration r = {
fully_connected::Init, fully_connected::Free,
fully_connected::Prepare<fully_connected::kLegacyPie>,
fully_connected::Eval<fully_connected::kLegacyPie>};
return &r;
}
TfLiteRegistration* Register_FULLY_CONNECTED() {
return Register_FULLY_CONNECTED_GENERIC_OPT();
}
} // namespace builtin
} // namespace ops
} // namespace tflite
| Intel-Corporation/tensorflow | tensorflow/lite/kernels/fully_connected.cc | C++ | apache-2.0 | 50,508 |
package com.wistronits.wh.annbot.business.biz.modules;
import com.wistronits.wh.annbot.business.mvp.contract.MainContract;
import com.wistronits.wh.annbot.business.mvp.contract.NurseListContract;
import dagger.Module;
import dagger.Provides;
/**
* Created by WH1705002 on 2017/9/1.
*/
@Module
public class NurseModule {
private NurseListContract.View mView;
public NurseModule(NurseListContract.View view) {
mView = view;
}
@Provides
NurseListContract.View provideNurseContract() {
return mView;
}
}
| WiAnnBot/annbot | app/src/main/java/com/wistronits/wh/annbot/business/biz/modules/NurseModule.java | Java | apache-2.0 | 547 |
#region -- License Terms --
//
// MessagePack for CLI
//
// Copyright (C) 2010-2012 FUJIWARA, Yusuke
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#endregion -- License Terms --
using System;
using System.Diagnostics;
using System.IO;
using MsgPack;
using MsgPack.Serialization;
using NUnit.Framework; // For running checking
namespace Samples
{
/// <summary>
/// A sample code for explore MessagePackObject.
/// </summary>
[TestFixture]
public class HandlingDynamicObjectSample
{
[Test]
public void SerializeThenDeserialize()
{
// They are object for just description.
var targetObject =
new PhotoEntry // See Sample01_BasicUsage.cs
{
Id = 123,
Title = "My photo",
Date = DateTime.Now,
Image = new byte[] { 1, 2, 3, 4 },
Comment = "This is test object to be serialize/deserialize using MsgPack."
};
targetObject.Tags.Add( "Sample" );
targetObject.Tags.Add( "Excellent" );
var stream = new MemoryStream();
// Set using Map instead of Array to serialize complex object. See Sample03 for details.
var context = new SerializationContext();
context.SerializationMethod = SerializationMethod.Map;
// You can use default context if you want to use map in all serializations which use default context.
// SerializationContext.Default.SerializationMethod = SerializationMethod.Map;
// 1. Create serializer instance.
var serializer = MessagePackSerializer.Get<PhotoEntry>( context );
// 2. Serialize object to the specified stream.
serializer.Pack( stream, targetObject );
// Set position to head of the stream to demonstrate deserialization.
stream.Position = 0;
// 3. Unpack MessagePackObject to get raw representation.
var rawObject = Unpacking.UnpackObject( stream );
// 3-b. You can read MPO tree via Unpacker
// var unpacker = Unpacker.Create( stream );
// 3-c. Or, you can get it from serializer directly.
// var rawObject = MessagePackSerializer.UnpackMessagePackObject( stream );
// Check its type
Debug.WriteLine( "Is array? {0}", rawObject.IsArray ); // IsList is alias
Debug.WriteLine( "Is map? {0}", rawObject.IsMap ); // IsDictionary is alias
Debug.WriteLine( "Type: {0}", rawObject.UnderlyingType );
// Gets serialized fields.
// Note: When the object was serialized as array instead of map, use index instead.
var asDictionary = rawObject.AsDictionary();
Debug.WriteLine( "Id : {0}({1})", asDictionary[ "Id" ], asDictionary[ "Id" ].UnderlyingType );
// String is encoded as utf-8 by default.
Debug.WriteLine( "Title : {0}({1})", asDictionary[ "Title" ], asDictionary[ "Title" ].UnderlyingType );
// Non-primitive is serialized as complex type or encoded primitive type.
// DateTimeOffset is encoded as array[2]{ticks,offset}
Debug.WriteLine( "Date : {0}({1})", asDictionary[ "Date" ], asDictionary[ "Date" ].UnderlyingType );
// byte[] is byte[], as you know.
Debug.WriteLine( "Image : {0}({1})", asDictionary[ "Image" ], asDictionary[ "Image" ].UnderlyingType );
// 4. Now MessagePackSerializer handles MessagePackObject directly.
var mpo = serializer.ToMessagePackObject( targetObject );
var asDictionary2 = mpo.AsDictionary();
Debug.WriteLine( "---- ToMessagePackObject ----" );
Debug.WriteLine( "Id : {0}({1})", asDictionary2[ "Id" ], asDictionary2[ "Id" ].UnderlyingType );
Debug.WriteLine( "Title : {0}({1})", asDictionary2[ "Title" ], asDictionary2[ "Title" ].UnderlyingType );
Debug.WriteLine( "Date : {0}({1})", asDictionary2[ "Date" ], asDictionary2[ "Date" ].UnderlyingType );
Debug.WriteLine( "Image : {0}({1})", asDictionary2[ "Image" ], asDictionary2[ "Image" ].UnderlyingType );
// 5. Use MessagePackSerializer to deserialize target object from MessagePackObject
var targetObject2 = serializer.FromMessagePackObject( mpo );
Debug.WriteLine( "---- FromMessagePackObject ----" );
Debug.WriteLine( "Id : {0}", targetObject2.Id );
Debug.WriteLine( "Title : {0}", targetObject2.Title );
Debug.WriteLine( "Date : {0}", targetObject2.Date );
Debug.WriteLine( "Image : {0}", Convert.ToBase64String( targetObject2.Image ) );
}
}
}
| msgpack/msgpack-cli | samples/Samples/Sample02_HandlingDynamicObject.cs | C# | apache-2.0 | 4,711 |
package com.keeps.crm.service.impl;
import java.text.DecimalFormat;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.keeps.core.service.AbstractService;
import com.keeps.crm.dao.BuyRecordDao;
import com.keeps.crm.service.BuyRecordService;
import com.keeps.model.TBuyRecord;
import com.keeps.tools.exception.CapecException;
import com.keeps.tools.utils.Assert;
import com.keeps.tools.utils.DateUtils;
import com.keeps.tools.utils.StringUtils;
import com.keeps.tools.utils.page.Page;
import com.keeps.tools.utils.threadlocal.UserSchoolThread;
/**
* <p>Title: BuyRecordServiceImpl.java</p>
* <p>Description: 客户购买记录Service实现类 </p>
* <p>Copyright: Copyright (c) KEEPS</p>
* @author keeps
* @version v 1.00
* @date 创建日期:2017年6月23日
* 修改日期:
* 修改人:
* 复审人:
*/
@Service
public class BuyRecordServiceImpl extends AbstractService implements BuyRecordService {
@Autowired
private BuyRecordDao buyRecordDao;
@Override
public Page queryList(TBuyRecord buyRecord) {
return buyRecordDao.queryList(buyRecord,UserSchoolThread.get().isSuperAdmin());
}
@Override
public Page queryStreamList(TBuyRecord buyRecord){
if (!UserSchoolThread.get().isSuperAdmin()) {
buyRecord.setEmpid(UserSchoolThread.get().getUserid());
}
return buyRecordDao.queryStreamList(buyRecord,UserSchoolThread.get().isSuperAdmin());
}
public Page queryStatisticsList(TBuyRecord buyRecord){
if (!UserSchoolThread.get().isSuperAdmin()) {
buyRecord.setEmpid(UserSchoolThread.get().getUserid());
}
return buyRecordDao.queryStatisticsList(buyRecord,UserSchoolThread.get().isSuperAdmin());
}
@Override
public String saveOrUpdate(TBuyRecord buyRecord) {
Assert.isTrue(buyRecord.getClientid()!=null, "客户id不能为空!");
if (StringUtils.notText(buyRecord.getUpdatetimestr())) {
buyRecord.setUpdatetimestr(DateUtils.formatNow());
}
Assert.isTrue(StringUtils.hasText(buyRecord.getProductname()), "产品名称不允许为空!");
String[] nameprices = buyRecord.getProductname().split("¥");
buyRecord.setProductname(nameprices[0]);
Assert.isTrue(buyRecord.getBuynum()!=null, "数量不能为空");
Assert.isTrue(buyRecord.getBuynum()>0, "数量不能小于0");
//Assert.isTrue(buyRecord.getPrice()!=null, "产品价格不允许为空!");
DecimalFormat df = new DecimalFormat("#.##");
try {
buyRecord.setPrice(Float.parseFloat(nameprices[1]));
buyRecord.setTotalprice(Float.parseFloat(df.format(buyRecord.getPrice()*buyRecord.getBuynum())));
} catch (NumberFormatException e) {
throw new CapecException("");
}
Assert.isTrue(StringUtils.hasText(buyRecord.getUpdatetimestr()), "购买时间不允许为空!");
buyRecord.setEmpid(UserSchoolThread.get().getUserid());
buyRecord.setUpdatetime(DateUtils.parse("yyyy-MM-dd HH:mm", buyRecord.getUpdatetimestr()));
//buyRecord.setClientid(clientid);
super.save(buyRecord);
return null;
}
}
| keepsl/keepsmis | crm/src/main/java/com/keeps/crm/service/impl/BuyRecordServiceImpl.java | Java | apache-2.0 | 3,106 |
/**
* This file is part of log4j2redis
*
* Copyright (c) 2012 by Pavlo Baron (pb at pbit dot org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Pavlo Baron <pb at pbit dot org>
* @author Landro Silva
* @copyright 2012 Pavlo Baron */
package test.org.pbit.log4j2redis;
import org.apache.log4j.Logger;
public class Log4j2RedisTest {
public static class LogThread extends Thread {
Logger log = Logger.getLogger("LogThread");
public void run() {
for (long i = 0; i < 10000; i++)
log.warn("whatever " + i);
}
}
static Logger log = Logger.getLogger("LogMainThread");
public static void main(String[] args) {
for (int i = 1; i <= 9; i++) {
new Log4j2RedisTest.LogThread().start();
}
for (long i = 0; i < 10000; i++) {
log.error("that's me " + i);
}
}
}
| pavlobaron/log4j2redis | src/test/org/pbit/log4j2redis/Log4j2RedisTest.java | Java | apache-2.0 | 1,410 |
/*!
* ${copyright}
*/
sap.ui.define([
"sap/ui/test/_OpaLogger",
"sap/ui/base/ManagedObject"
], function (_OpaLogger, ManagedObject) {
"use strict";
/**
* @class Matchers for Opa5 - needs to implement an isMatching function that returns a boolean and will get a control instance as parameter
* @abstract
* @extends sap.ui.base.ManagedObject
* @public
* @name sap.ui.test.matchers.Matcher
* @author SAP SE
* @since 1.23
*/
var Matcher = ManagedObject.extend("sap.ui.test.matchers.Matcher", {
metadata : {
publicMethods : [ "isMatching" ]
},
constructor: function () {
this._oLogger = _OpaLogger.getLogger(this.getMetadata().getName());
return ManagedObject.prototype.constructor.apply(this, arguments);
},
/**
* Checks if the matcher is matching - will get an instance of sap.ui.core.Control as parameter.
*
* Should be overwritten by subclasses
*
* @param {sap.ui.core.Control} oControl the control that is checked by the matcher
* @return {boolean} true if the Control is matching the condition of the matcher
* @protected
* @name sap.ui.test.matchers.Matcher#isMatching
* @function
*/
isMatching : function (oControl) {
return true;
},
/**
* @return {object} window of the application under test, or the current window if OPA5 is not loaded
* Note: declared matchers are instanciated in the app context (by MatcherFactory)
* while users instanciate matchers in the test context (in a waitFor)
* @private
* @function
*/
_getApplicationWindow: function () {
if (sap.ui.test && sap.ui.test.Opa5) {
// matcher context === test context, because Opa5 is loadded
return sap.ui.test.Opa5.getWindow();
} else {
// matcher context === app context
return window;
}
}
});
return Matcher;
}); | SAP/openui5 | src/sap.ui.core/src/sap/ui/test/matchers/Matcher.js | JavaScript | apache-2.0 | 1,818 |
package config
type GitManager struct {
Local []*GitLocal
Remote []*GitRemote
}
| flant/dapp | pkg/config/git_manager.go | GO | apache-2.0 | 84 |
///===--- Actor.cpp - Standard actor implementation ------------------------===///
///
/// This source file is part of the Swift.org open source project
///
/// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
/// Licensed under Apache License v2.0 with Runtime Library Exception
///
/// See https:///swift.org/LICENSE.txt for license information
/// See https:///swift.org/CONTRIBUTORS.txt for the list of Swift project authors
///
///===----------------------------------------------------------------------===///
///
/// The default actor implementation for Swift actors, plus related
/// routines such as generic executor enqueuing and switching.
///
///===----------------------------------------------------------------------===///
#include "swift/Runtime/Concurrency.h"
#include "swift/Runtime/Atomic.h"
#include "swift/Runtime/Mutex.h"
#include "swift/Runtime/ThreadLocal.h"
#include "swift/ABI/Actor.h"
#include "llvm/ADT/PointerIntPair.h"
using namespace swift;
/// Should we yield the thread?
static bool shouldYieldThread() {
// FIXME: system scheduler integration
return false;
}
/*****************************************************************************/
/*********************** DEFAULT ACTOR IMPLEMENTATION ************************/
/*****************************************************************************/
namespace {
class DefaultActorImpl;
/// A job to process a default actor. Allocated inline in the actor.
class ProcessInlineJob : public Job {
public:
ProcessInlineJob(JobPriority priority)
: Job({JobKind::DefaultActorInline, priority}, &process) {}
SWIFT_CC(swiftasync)
static void process(Job *job, ExecutorRef executor);
static bool classof(const Job *job) {
return job->Flags.getKind() == JobKind::DefaultActorInline;
}
};
/// A job to process a default actor that's allocated separately from
/// the actor but doesn't need the override mechanics.
class ProcessOutOfLineJob : public Job {
DefaultActorImpl *Actor;
public:
ProcessOutOfLineJob(DefaultActorImpl *actor, JobPriority priority)
: Job({JobKind::DefaultActorSeparate, priority}, &process),
Actor(actor) {}
SWIFT_CC(swiftasync)
static void process(Job *job, ExecutorRef executor);
static bool classof(const Job *job) {
return job->Flags.getKind() == JobKind::DefaultActorSeparate;
}
};
/// A job to process a default actor with a new priority; allocated
/// separately from the actor.
class ProcessOverrideJob;
/// Information about the currently-running processing job.
struct RunningJobInfo {
enum KindType : uint8_t {
Inline, Override, Other
};
KindType Kind;
JobPriority Priority;
ProcessOverrideJob *OverrideJob;
bool wasInlineJob() const {
return Kind == Inline;
}
static RunningJobInfo forOther(JobPriority priority) {
return {Other, priority, nullptr};
}
static RunningJobInfo forInline(JobPriority priority) {
return {Inline, priority, nullptr};
}
static RunningJobInfo forOverride(ProcessOverrideJob *job);
void setAbandoned();
void setRunning();
bool waitForActivation();
};
class JobRef {
enum : uintptr_t {
NeedsPreprocessing = 0x1,
IsOverride = 0x2,
JobMask = ~uintptr_t(NeedsPreprocessing | IsOverride)
};
/// A Job* that may have one of the two bits above mangled into it.
uintptr_t Value;
JobRef(Job *job, unsigned flags)
: Value(reinterpret_cast<uintptr_t>(job) | flags) {}
public:
constexpr JobRef() : Value(0) {}
/// Return a reference to a job that's been properly preprocessed.
static JobRef getPreprocessed(Job *job) {
/// We allow null pointers here.
return { job, 0 };
}
/// Return a reference to a job that hasn't been preprocesssed yet.
static JobRef getUnpreprocessed(Job *job) {
assert(job && "passing a null job");
return { job, NeedsPreprocessing };
}
/// Return a reference to an override job, which needs special
/// treatment during preprocessing.
static JobRef getOverride(ProcessOverrideJob *job);
/// Is this a null reference?
operator bool() const { return Value != 0; }
/// Does this job need to be pre-processed before we can treat
/// the job queue as a proper queue?
bool needsPreprocessing() const {
return Value & NeedsPreprocessing;
}
/// Is this an unpreprocessed override job?
bool isOverride() const {
return Value & IsOverride;
}
/// Given that this is an override job, return it.
ProcessOverrideJob *getAsOverride() const {
assert(isOverride());
return reinterpret_cast<ProcessOverrideJob*>(Value & JobMask);
}
ProcessOverrideJob *getAsPreprocessedOverride() const;
Job *getAsJob() const {
assert(!isOverride());
return reinterpret_cast<Job*>(Value & JobMask);
}
Job *getAsPreprocessedJob() const {
assert(!isOverride() && !needsPreprocessing());
return reinterpret_cast<Job*>(Value);
}
bool operator==(JobRef other) const {
return Value == other.Value;
}
bool operator!=(JobRef other) const {
return Value != other.Value;
}
};
/// The default actor implementation.
///
/// Ownership of the actor is subtle. Jobs are assumed to keep the actor
/// alive as long as they're executing on it; this allows us to avoid
/// retaining and releasing whenever threads are scheduled to run a job.
/// While jobs are enqueued on the actor, there is a conceptual shared
/// ownership of the currently-enqueued jobs which is passed around
/// between threads and processing jobs and managed using extra retains
/// and releases of the actor. The basic invariant is as follows:
///
/// - Let R be 1 if there are jobs enqueued on the actor or if a job
/// is currently running on the actor; otherwise let R be 0.
/// - Let N be the number of active processing jobs for the actor.
/// - N >= R
/// - There are N - R extra retains of the actor.
///
/// We can think of this as there being one "owning" processing job
/// and K "extra" jobs. If there is a processing job that is actively
/// running the actor, it is always the owning job; otherwise, any of
/// the N jobs may win the race to become the owning job.
///
/// We then have the following ownership rules:
///
/// - When we enqueue the first job on an actor, then R becomes 1, and
/// we must create a processing job so that N >= R. We do not need to
/// retain the actor.
/// - When we create an extra job to process an actor (e.g. because of
/// priority overrides), N increases but R remains the same. We must
/// retain the actor.
/// - When we start running an actor, our job definitively becomes the
/// owning job, but neither N nor R changes. We do not need to retain
/// the actor.
/// - When we go to start running an actor and for whatever reason we
/// don't actually do so, we are eliminating an extra processing job,
/// and so N decreases but R remains the same. We must release the
/// actor.
/// - When we are running an actor and give it up, and there are no
/// remaining jobs on it, then R becomes 0 and N decreases by 1.
/// We do not need to release the actor.
/// - When we are running an actor and give it up, and there are jobs
/// remaining on it, then R remains 1 but N is decreasing by 1.
/// We must either release the actor or create a new processing job
/// for it to maintain the balance.
class DefaultActorImpl : public HeapObject {
enum class Status {
/// The actor is not currently scheduled. Completely redundant
/// with the job list being empty.
Idle,
/// There is currently a job scheduled to process the actor at the
/// stored max priority.
Scheduled,
/// There is currently a thread processing the actor at the stored
/// max priority.
Running
};
struct Flags : public FlagSet<size_t> {
enum : size_t {
Status_offset = 0,
Status_width = 2,
HasActiveInlineJob = 2,
MaxPriority = 8,
MaxPriority_width = JobFlags::Priority_width,
// FIXME: add a reference to the running thread ID so that we
// can boost priorities.
};
/// What is the current high-level status of this actor?
FLAGSET_DEFINE_FIELD_ACCESSORS(Status_offset, Status_width, Status,
getStatus, setStatus)
/// Is there currently an active processing job allocated inline
/// in the actor?
FLAGSET_DEFINE_FLAG_ACCESSORS(HasActiveInlineJob,
hasActiveInlineJob, setHasActiveInlineJob)
/// What is the maximum priority of jobs that are currently running
/// or enqueued on this actor?
///
/// Note that the above isn't quite correct: we don't actually
/// lower this after we finish processing higher-priority tasks.
/// (Doing so introduces some subtleties around kicking off
/// lower-priority processing jobs.)
FLAGSET_DEFINE_FIELD_ACCESSORS(MaxPriority, MaxPriority_width,
JobPriority,
getMaxPriority, setMaxPriority)
};
/// This is designed to fit into two words, which can generally be
/// done lock-free on all our supported platforms.
struct alignas(2 * sizeof(void*)) State {
JobRef FirstJob;
struct Flags Flags;
};
swift::atomic<State> CurrentState;
friend class ProcessInlineJob;
union {
ProcessInlineJob JobStorage;
};
public:
/// Properly construct an actor, except for the heap header.
void initialize() {
new (&CurrentState) std::atomic<State>(State{JobRef(), Flags()});
}
/// Properly destruct an actor, except for the heap header.
void destroy() {
assert(CurrentState.load(std::memory_order_relaxed).Flags.getStatus()
== Status::Idle && "actor not idle during destruction?");
}
/// Add a job to this actor.
void enqueue(Job *job);
/// Take over running this actor in the current thread, if possible.
bool tryAssumeThread(RunningJobInfo runner);
/// Give up running this actor in the current thread.
void giveUpThread(RunningJobInfo runner);
/// Claim the next job off the actor or give it up.
Job *claimNextJobOrGiveUp(bool actorIsOwned, RunningJobInfo runner);
private:
/// Schedule an inline processing job. This can generally only be
/// done if we know nobody else is trying to do it at the same time,
/// e.g. if this thread just sucessfully transitioned the actor from
/// Idle to Scheduled.
void scheduleNonOverrideProcessJob(JobPriority priority,
bool hasActiveInlineJob);
static DefaultActorImpl *fromInlineJob(Job *job) {
assert(isa<ProcessInlineJob>(job));
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
return reinterpret_cast<DefaultActorImpl*>(
reinterpret_cast<char*>(job) - offsetof(DefaultActorImpl, JobStorage));
#pragma clang diagnostic pop
}
class OverrideJobCache {
ProcessOverrideJob *Job = nullptr;
bool IsNeeded = false;
#ifndef NDEBUG
bool WasCommitted = false;
#endif
public:
OverrideJobCache() = default;
OverrideJobCache(const OverrideJobCache &) = delete;
OverrideJobCache &operator=(const OverrideJobCache &) = delete;
~OverrideJobCache() {
assert(WasCommitted && "didn't commit override job!");
}
void addToState(DefaultActorImpl *actor, State &newState);
void setNotNeeded() { IsNeeded = false; }
void commit();
};
};
} /// end anonymous namespace
static_assert(sizeof(DefaultActorImpl) <= sizeof(DefaultActor) &&
alignof(DefaultActorImpl) <= alignof(DefaultActor),
"DefaultActorImpl doesn't fit in DefaultActor");
static DefaultActorImpl *asImpl(DefaultActor *actor) {
return reinterpret_cast<DefaultActorImpl*>(actor);
}
static DefaultActor *asAbstract(DefaultActorImpl *actor) {
return reinterpret_cast<DefaultActor*>(actor);
}
/*****************************************************************************/
/************************** DEFAULT ACTOR TRACKING ***************************/
/*****************************************************************************/
namespace {
enum Mode {
/// Shadow any existing frame, leaving it untouched.
ShadowExistingFrame,
/// Update any existing frame if possible.
UpdateExistingFrame
};
/// A little class for tracking whether there's a frame processing
/// default actors in the current thread.
///
/// The goal of this class is to encapsulate uses of the central variable.
/// We want to potentially use a more efficient access pattern than
/// ordinary thread-locals when that's available.
class DefaultActorProcessingFrame {
using ValueType = llvm::PointerIntPair<DefaultActorImpl*, 1, bool>;
/// The active default actor on the current thread, if any.
/// This may still need to be tracked separately from the active
/// executor, if/when we start tracking that in thread-local storage.
static SWIFT_RUNTIME_DECLARE_THREAD_LOCAL(ValueType, ThreadLocalValue);
ValueType SavedValue;
bool IsNeeded;
public:
/// Flag that this thread is processing the given actor (or null,
/// for generic processing) and set up a processing frame if we
/// don't already have one.
DefaultActorProcessingFrame(DefaultActorImpl *actor, Mode mode) {
// If we should shadow an existing frame, save any value that
// it might have set.
if (mode == ShadowExistingFrame) {
SavedValue = ThreadLocalValue.get();
IsNeeded = true;
// If we should update an existing frame, just replace any value
// that it might have set.
} else {
IsNeeded = !ThreadLocalValue.get().getInt();
SavedValue = ValueType();
}
ThreadLocalValue.set(ValueType(actor, true));
}
DefaultActorProcessingFrame(const DefaultActorProcessingFrame &) = delete;
DefaultActorProcessingFrame &operator=(
const DefaultActorProcessingFrame &) = delete;
/// Return the currently active actor.
DefaultActorImpl *getActiveActor() {
return ThreadLocalValue.get().getPointer();
}
/// Exit the frame. This isn't a destructor intentionally, because
/// we need to be able to tail-call out of frames that might have
/// optimistically made one of these.
void exit() {
ThreadLocalValue.set(SavedValue);
}
/// Return whether this frame was needed; if it was not, then it's
/// okay to abandon it without calling exit(). This is only meaningful
/// when constructed in the UpdateExistingFrame mode.
bool isNeeded() {
return IsNeeded;
}
};
/// Define the thread-local.
SWIFT_RUNTIME_DECLARE_THREAD_LOCAL(
DefaultActorProcessingFrame::ValueType,
DefaultActorProcessingFrame::ThreadLocalValue);
} /// end anonymous namespace
/*****************************************************************************/
/*********************** DEFAULT ACTOR IMPLEMENTATION ************************/
/*****************************************************************************/
/// Given that a job is enqueued normally on a default actor, get/set
/// the next job in the actor's queue.
///
/// Note that this must not be used on the override jobs that can appear
/// in the queue; those jobs are not actually in the actor's queue (they're
/// on the global execution queues). So the actor's actual queue flows
/// through the NextJob field on those objects rather than through
/// the SchedulerPrivate fields.
static JobRef getNextJobInQueue(Job *job) {
return *reinterpret_cast<JobRef*>(job->SchedulerPrivate);
}
static void setNextJobInQueue(Job *job, JobRef next) {
*reinterpret_cast<JobRef*>(job->SchedulerPrivate) = next;
}
/// Schedule a processing job that doesn't have to be an override job.
///
/// We can either do this with inline storage or heap-allocated.
/// To ues inline storage, we need to verify that the hasActiveInlineJob
/// flag is not set in the state and then successfully set it. The
/// argument reports that this has happened correctly.
///
/// We should only schedule a non-override processing job at all if
/// we're transferring ownership of the jobs in it; see the ownership
/// comment on DefaultActorImpl.
void DefaultActorImpl::scheduleNonOverrideProcessJob(JobPriority priority,
bool hasActiveInlineJob) {
Job *job;
if (hasActiveInlineJob) {
job = new ProcessOutOfLineJob(this, priority);
} else {
job = new (&JobStorage) ProcessInlineJob(priority);
}
swift_task_enqueueGlobal(job);
}
namespace {
/// A job to process a specific default actor at a higher priority than
/// it was previously running at.
///
/// When an override job is successfully registered with an actor
/// (not enqueued there), the thread processing the actor and the
/// thread processing the override job coordinate by each calling
/// one of a set of methods on the object.
class ProcessOverrideJob : public Job {
DefaultActorImpl *Actor;
ConditionVariable::Mutex Lock;
ConditionVariable Queue;
/// Has the actor made a decision about this job yet?
bool IsResolvedByActor = false;
/// Has the job made a decision about itself yet?
bool IsResolvedByJob = false;
/// Has this job been abandoned?
bool IsAbandoned = false;
public:
/// SchedulerPrivate in an override job is used for actually scheduling
/// the job, so the actor queue goes through this instead.
///
/// We also use this temporarily for the list of override jobs on
/// the actor that we need to wake up.
JobRef NextJob;
public:
ProcessOverrideJob(DefaultActorImpl *actor, JobPriority priority,
JobRef nextJob)
: Job({JobKind::DefaultActorOverride, priority}, &process),
Actor(actor), NextJob(nextJob) {}
DefaultActorImpl *getActor() const { return Actor; }
/// Called by the job to notify the actor that the job has chosen
/// to abandon its work. This is irrevocable: the job is not going
/// to have a thread behind it.
///
/// This may delete the job or cause it to be deleted on another thread.
void setAbandoned() {
bool shouldDelete = false;
Lock.withLock([&] {
assert(!IsResolvedByJob && "job already resolved itself");
IsResolvedByJob = true;
IsAbandoned = true;
shouldDelete = IsResolvedByJob && IsResolvedByActor;
});
if (shouldDelete) delete this;
}
/// Called by the job to notify the actor that the job has successfully
/// taken over the actor and is now running it.
///
/// This may delete the job object or cause it to be deleted on
/// another thread.
void setRunning() {
bool shouldDelete = false;
Lock.withLock([&] {
assert(!IsResolvedByJob && "job already resolved itself");
IsResolvedByJob = true;
shouldDelete = IsResolvedByJob && IsResolvedByActor;
});
if (shouldDelete) delete this;
}
/// Called by the job to wait for the actor to resolve what the job
/// should do.
bool waitForActivation() {
bool isActivated = false;
Lock.withLockOrWait(Queue, [&] {
assert(!IsResolvedByJob && "job already resolved itself");
if (IsResolvedByActor) {
isActivated = !IsAbandoned;
IsResolvedByJob = true;
return true;
}
return false;
});
delete this;
return isActivated;
}
/// Called by the actor to notify this job that the actor thinks it
/// should try to take over the actor. It's okay if that doesn't
/// succeed (as long as that's because some other job is going to
/// take over).
///
/// This may delete the job or cause it to be deleted on another
/// thread.
bool wakeAndActivate() {
bool shouldDelete = false;
bool mayHaveBeenActivated = false;
Lock.withLockThenNotifyAll(Queue, [&] {
assert(!IsResolvedByActor && "actor already resolved this sjob");
IsResolvedByActor = true;
mayHaveBeenActivated = IsResolvedByJob && !IsAbandoned;
shouldDelete = IsResolvedByJob && IsResolvedByActor;
});
if (shouldDelete) delete this;
return mayHaveBeenActivated;
}
/// Called by the actor to notify this job that the actor does not
/// think it should try to take over the actor. It's okay if the
/// job successfully takes over the actor anyway.
///
/// This may delete the job or cause it to be deleted on another
/// thread.
void wakeAndAbandon() {
bool shouldDelete = false;
Lock.withLockThenNotifyAll(Queue, [&] {
assert(!IsResolvedByActor && "actor already resolved this job");
IsResolvedByActor = true;
IsAbandoned = true;
shouldDelete = IsResolvedByJob && IsResolvedByActor;
});
if (shouldDelete) delete this;
}
SWIFT_CC(swiftasync)
static void process(Job *job, ExecutorRef _executor);
static bool classof(const Job *job) {
return job->Flags.getKind() == JobKind::DefaultActorOverride;
}
};
} /// end anonymous namespace
JobRef JobRef::getOverride(ProcessOverrideJob *job) {
return JobRef(job, NeedsPreprocessing | IsOverride);
}
ProcessOverrideJob *JobRef::getAsPreprocessedOverride() const {
return cast_or_null<ProcessOverrideJob>(getAsPreprocessedJob());
}
RunningJobInfo RunningJobInfo::forOverride(ProcessOverrideJob *job) {
return {Override, job->getPriority(), job};
}
/// Flag that the current processing job has been abandoned
/// and will not be running the actor.
void RunningJobInfo::setAbandoned() {
if (OverrideJob) {
OverrideJob->setAbandoned();
OverrideJob = nullptr;
}
}
/// Flag that the current processing job is now running the actor.
void RunningJobInfo::setRunning() {
if (OverrideJob) {
OverrideJob->setRunning();
OverrideJob = nullptr;
}
}
/// Try to wait for the current processing job to be activated,
/// if that's possible. It's okay to call this multiple times
/// (or to call setAbandoned/setRunning after it) as long as
/// it's all on a single value.
bool RunningJobInfo::waitForActivation() {
if (Kind == Override) {
// If we don't have an override job, it's because we've already
// waited for activation successfully.
if (!OverrideJob) return true;
bool result = OverrideJob->waitForActivation();
OverrideJob = nullptr;
return result;
}
return false;
}
/// Wake all the overrides in the given list, activating the first
/// that exactly matches the target priority, if any.
static void wakeOverrides(ProcessOverrideJob *nextOverride,
Optional<JobPriority> targetPriority) {
bool hasAlreadyActivated = false;
while (nextOverride) {
// We have to advance to the next override before we call one of
// the wake methods because they can delete the job immediately
// (and even if they don't, we'd still be racing with deletion).
auto cur = nextOverride;
nextOverride = cur->NextJob.getAsPreprocessedOverride();
if (hasAlreadyActivated ||
!targetPriority ||
cur->getPriority() != *targetPriority)
cur->wakeAndAbandon();
else
hasAlreadyActivated = cur->wakeAndActivate();
}
}
/// Flag that an override job is needed and create it.
void DefaultActorImpl::OverrideJobCache::addToState(DefaultActorImpl *actor,
State &newState) {
IsNeeded = true;
auto newPriority = newState.Flags.getMaxPriority();
auto nextJob = newState.FirstJob;
if (Job) {
Job->Flags.setPriority(newPriority);
Job->NextJob = nextJob;
} else {
// Override jobs are always "extra" from the perspective of our
// ownership rules and so require a retain of the actor. We must
// do this before changing the actor state because other jobs may
// race to release the actor as soon as we change the actor state.
swift_retain(actor);
Job = new ProcessOverrideJob(actor, newPriority, nextJob);
}
newState.FirstJob = JobRef::getOverride(Job);
}
/// Schedule the override job if we created one and still need it.
/// If we created one but didn't end up needing it (which can happen
/// with a race to override), destroy it.
void DefaultActorImpl::OverrideJobCache::commit() {
#ifndef NDEBUG
assert(!WasCommitted && "committing override job multiple timee");
WasCommitted = true;
#endif
if (Job) {
if (IsNeeded) {
swift_task_enqueueGlobal(Job);
} else {
swift_release(Job->getActor());
delete Job;
}
}
}
/// Preprocess the prefix of the actor's queue that hasn't already
/// been preprocessed:
///
/// - Split the jobs into registered overrides and actual jobs.
/// - Append the actual jobs to any already-preprocessed job list.
///
/// The returned job should become the new root of the job queue
/// (or may be immediately dequeued, in which its successor should).
/// All of the jobs in this list are guaranteed to be non-override jobs.
static Job *preprocessQueue(JobRef first,
JobRef previousFirst,
Job *previousFirstNewJob,
ProcessOverrideJob *&overridesToWake) {
assert(previousFirst || previousFirstNewJob == nullptr);
if (!first.needsPreprocessing())
return first.getAsPreprocessedJob();
Job *firstNewJob = nullptr;
while (first != previousFirst) {
// If we find something that doesn't need preprocessing, it must've
// been left by a previous queue-processing, which means that
// this must be our first attempt to preprocess in this processing.
// Just treat the queue from this point as a well-formed whole
// to which we need to add any new items we might've just found.
if (!first.needsPreprocessing()) {
assert(!previousFirst && !previousFirstNewJob);
previousFirstNewJob = first.getAsPreprocessedJob();
break;
}
// If the job is an override, add it to the list of override jobs
// that we need to wake up. Note that the list of override jobs
// flows through NextJob; we must not use getNextJobInQueue because
// that touches queue-private state, and the override job is
// not enqueued on the actor, merely registered with it.
if (first.isOverride()) {
auto overrideJob = first.getAsOverride();
first = overrideJob->NextJob;
overrideJob->NextJob = JobRef::getPreprocessed(overridesToWake);
overridesToWake = overrideJob;
continue;
}
// If the job isn't an override, add it to the front of the list of
// jobs we're building up. Note that this reverses the order of
// jobs; since enqueue() always adds jobs to the front, reversing
// the order effectively makes the actor queue FIFO, which is what
// we want.
// FIXME: but we should also sort by priority
auto job = first.getAsJob();
first = getNextJobInQueue(job);
setNextJobInQueue(job, JobRef::getPreprocessed(firstNewJob));
firstNewJob = job;
}
// If there are jobs already in the queue, put the new jobs at the end.
if (!firstNewJob) {
firstNewJob = previousFirstNewJob;
} else if (previousFirstNewJob) {
auto cur = previousFirstNewJob;
while (true) {
auto next = getNextJobInQueue(cur).getAsPreprocessedJob();
if (!next) {
setNextJobInQueue(cur, JobRef::getPreprocessed(firstNewJob));
break;
}
cur = next;
}
firstNewJob = previousFirstNewJob;
}
return firstNewJob;
}
void DefaultActorImpl::giveUpThread(RunningJobInfo runner) {
auto oldState = CurrentState.load(std::memory_order_acquire);
assert(oldState.Flags.getStatus() == Status::Running);
ProcessOverrideJob *overridesToWake = nullptr;
auto firstNewJob = preprocessQueue(oldState.FirstJob, JobRef(), nullptr,
overridesToWake);
while (true) {
State newState = oldState;
newState.FirstJob = JobRef::getPreprocessed(firstNewJob);
if (firstNewJob) {
newState.Flags.setStatus(Status::Scheduled);
} else {
newState.Flags.setStatus(Status::Idle);
}
// If the runner was an inline job, it's no longer active.
if (runner.wasInlineJob()) {
newState.Flags.setHasActiveInlineJob(false);
}
bool hasMoreJobs = (bool) newState.FirstJob;
bool hasOverrideAtNewPriority =
(runner.Priority < oldState.Flags.getMaxPriority());
bool hasActiveInlineJob = newState.Flags.hasActiveInlineJob();
bool needsNewProcessJob = hasMoreJobs && !hasOverrideAtNewPriority;
// If we want to create a new inline job below, be sure to claim that
// in the new state.
if (needsNewProcessJob && !hasActiveInlineJob) {
newState.Flags.setHasActiveInlineJob(true);
}
auto firstPreprocessed = oldState.FirstJob;
if (!CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_acquire)) {
// Preprocess any new queue items.
firstNewJob = preprocessQueue(oldState.FirstJob,
firstPreprocessed,
firstNewJob,
overridesToWake);
// Try again.
continue;
}
// The priority of the remaining work.
auto newPriority = newState.Flags.getMaxPriority();
// Wake any overrides.
wakeOverrides(overridesToWake, newPriority);
// This is the actor's owning job; per the ownership rules (see
// the comment on DefaultActorImpl), if there are remaining
// jobs, we need to balance out our ownership one way or another.
// We also, of course, need to ensure that there's a thread that's
// actually going to process the actor.
if (hasMoreJobs) {
// If we know that there's an override job at the new priority,
// we can let it become the owning job. We just need to release.
if (hasOverrideAtNewPriority) {
swift_release(this);
// Otherwies, enqueue a job that will try to take over running
// with the new priority. This also ensures that there's a job
// at that priority which will actually take over the actor.
} else {
scheduleNonOverrideProcessJob(newPriority, hasActiveInlineJob);
}
}
return;
}
}
/// Claim the next job on the actor or give it up forever.
///
/// The running thread doesn't need to already own the actor to do this.
/// It does need to be participating correctly in the ownership
/// scheme as a "processing job"; see the comment on DefaultActorImpl.
Job *DefaultActorImpl::claimNextJobOrGiveUp(bool actorIsOwned,
RunningJobInfo runner) {
auto oldState = CurrentState.load(std::memory_order_acquire);
// The status had better be Running unless we're trying to acquire
// our first job.
assert(oldState.Flags.getStatus() == Status::Running ||
!actorIsOwned);
// If we don't yet own the actor, we need to try to claim the actor
// first; we cannot safely access the queue memory yet because other
// threads may concurrently be trying to do this.
if (!actorIsOwned) {
while (true) {
// A helper function when the only change we need to try is to
// update for an inline runner.
auto tryUpdateForInlineRunner = [&]{
if (!runner.wasInlineJob()) return true;
auto newState = oldState;
newState.Flags.setHasActiveInlineJob(false);
return CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_relaxed,
/*failure*/ std::memory_order_acquire);
};
// If the actor is out of work, or its priority doesn't match our
// priority, don't try to take over the actor.
if (!oldState.FirstJob ||
oldState.Flags.getMaxPriority() != runner.Priority) {
// The only change we need here is inline-runner bookkeeping.
if (!tryUpdateForInlineRunner())
continue;
// We're eliminating a processing thread; balance ownership.
swift_release(this);
runner.setAbandoned();
return nullptr;
}
// If the actor is currently running, we'd need to wait for
// it to stop. We can do this if we're an override job;
// otherwise we need to exit.
if (oldState.Flags.getStatus() == Status::Running) {
if (!runner.waitForActivation()) {
// The only change we need here is inline-runner bookkeeping.
if (!tryUpdateForInlineRunner())
continue;
swift_release(this);
return nullptr;
}
// Fall through into the compare-exchange below, but anticipate
// that the actor is now Scheduled instead of Running.
oldState.Flags.setStatus(Status::Scheduled);
}
// Try to set the state as Running.
assert(oldState.Flags.getStatus() == Status::Scheduled);
auto newState = oldState;
newState.Flags.setStatus(Status::Running);
// Also do our inline-runner bookkeeping.
if (runner.wasInlineJob())
newState.Flags.setHasActiveInlineJob(false);
if (!CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_relaxed,
/*failure*/ std::memory_order_acquire))
continue;
// If that succeeded, we can proceed to the main body.
oldState = newState;
runner.setRunning();
break;
}
}
assert(oldState.Flags.getStatus() == Status::Running);
// We should have taken care of the inline-job bookkeeping now.
assert(!oldState.Flags.hasActiveInlineJob() || !runner.wasInlineJob());
// Okay, now it's safe to look at queue state.
// Preprocess any queue items at the front of the queue.
ProcessOverrideJob *overridesToWake = nullptr;
auto newFirstJob = preprocessQueue(oldState.FirstJob, JobRef(),
nullptr, overridesToWake);
Optional<JobPriority> remainingJobPriority;
while (true) {
State newState = oldState;
// If the priority we're currently running with is adqeuate for
// all the remaining jobs, try to dequeue something.
// FIXME: should this be an exact match in priority instead of
// potentially running jobs with too high a priority?
Job *jobToRun;
if (oldState.Flags.getMaxPriority() <= runner.Priority &&
newFirstJob) {
jobToRun = newFirstJob;
newState.FirstJob = getNextJobInQueue(newFirstJob);
newState.Flags.setStatus(Status::Running);
// Otherwise, we should give up the thread.
} else {
jobToRun = nullptr;
newState.FirstJob = JobRef::getPreprocessed(newFirstJob);
newState.Flags.setStatus(newFirstJob ? Status::Scheduled
: Status::Idle);
}
// Try to update the queue. The changes we've made to the queue
// structure need to be made visible even if we aren't dequeuing
// anything.
auto firstPreprocessed = oldState.FirstJob;
if (!CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_acquire)) {
// Preprocess any new queue items, which will have been formed
// into a linked list leading to the last head we observed.
// (The fact that that job may not be the head anymore doesn't
// matter; we're looking for an exact match with that.)
newFirstJob = preprocessQueue(oldState.FirstJob,
firstPreprocessed,
newFirstJob,
overridesToWake);
// Loop to retry updating the state.
continue;
}
// We successfully updated the state.
// If we're giving up the thread with jobs remaining, we need
// to release the actor, and we should wake overrides with the
// right priority.
Optional<JobPriority> remainingJobPriority;
if (!jobToRun && newFirstJob) {
remainingJobPriority = newState.Flags.getMaxPriority();
}
// Wake the overrides.
wakeOverrides(overridesToWake, remainingJobPriority);
// Per the ownership rules (see the comment on DefaultActorImpl),
// release the actor if we're giving up the thread with jobs
// remaining. We intentionally do this after wakeOverrides to
// try to get the overrides running a little faster.
if (remainingJobPriority)
swift_release(this);
return jobToRun;
}
}
/// The primary function for processing an actor on a thread. Start
/// processing the given default actor as the active default actor on
/// the current thread, and keep processing whatever actor we're
/// running when code returns back to us until we're not processing
/// any actors anymore.
static void processDefaultActor(DefaultActorImpl *currentActor,
RunningJobInfo runner) {
// Register that we're processing a default actor in this frame.
DefaultActorProcessingFrame frame(currentActor, ShadowExistingFrame);
bool threadIsRunningActor = false;
while (true) {
assert(currentActor);
// Immediately check if we've been asked to yield the thread.
if (shouldYieldThread())
break;
// Claim another job from the current actor.
auto job = currentActor->claimNextJobOrGiveUp(threadIsRunningActor,
runner);
// If we failed to claim a job, we have nothing to do.
if (!job) {
// We also gave up the actor as part of failing to claim it.
// Make sure we don't try to give up the actor again.
currentActor = nullptr;
break;
}
// Run the job.
job->run(ExecutorRef::forDefaultActor(asAbstract(currentActor)));
// The current actor may have changed after the job.
// If it's become nil, we have nothing to do.
currentActor = frame.getActiveActor();
if (!currentActor)
break;
// Otherwise, we know that we're running the actor on this thread.
threadIsRunningActor = true;
}
frame.exit();
// If we still have an active actor, we should give it up.
if (currentActor)
currentActor->giveUpThread(runner);
}
void ProcessInlineJob::process(Job *job, ExecutorRef _executor) {
DefaultActorImpl *actor = DefaultActorImpl::fromInlineJob(job);
// Pull the priority out of the job before we do anything that might
// invalidate it.
auto targetPriority = job->getPriority();
auto runner = RunningJobInfo::forInline(targetPriority);
// FIXME: force tail call
return processDefaultActor(actor, runner);
}
void ProcessOutOfLineJob::process(Job *job, ExecutorRef _executor) {
auto self = cast<ProcessOutOfLineJob>(job);
DefaultActorImpl *actor = self->Actor;
// Pull the priority out of the job before we do anything that might
// invalidate it.
auto targetPriority = job->getPriority();
auto runner = RunningJobInfo::forOther(targetPriority);
delete self;
// FIXME: force tail call
return processDefaultActor(actor, runner);
}
void ProcessOverrideJob::process(Job *job, ExecutorRef _executor) {
auto self = cast<ProcessOverrideJob>(job);
// Pull the actor and priority out of the job.
auto actor = self->Actor;
auto runner = RunningJobInfo::forOverride(self);
// FIXME: force tail call
return processDefaultActor(actor, runner);
}
void DefaultActorImpl::enqueue(Job *job) {
auto oldState = CurrentState.load(std::memory_order_relaxed);
OverrideJobCache overrideJob;
while (true) {
auto newState = oldState;
// Put the job at the front of the job list (which will get
// reversed during preprocessing).
setNextJobInQueue(job, oldState.FirstJob);
newState.FirstJob = JobRef::getUnpreprocessed(job);
auto oldStatus = oldState.Flags.getStatus();
bool wasIdle = oldStatus == Status::Idle;
// Update the priority: the prriority of the job we're adding
// if the actor was idle, or the max if not. Only the running
// thread can decrease the actor's priority once it's non-idle.
// (But note that the job we enqueue can still observe a
// lowered priority.)
auto oldPriority = oldState.Flags.getMaxPriority();
auto newPriority =
wasIdle ? job->getPriority()
: std::max(oldPriority, job->getPriority());
newState.Flags.setMaxPriority(newPriority);
// If we need an override job, create it (if necessary) and
// register it with the queue.
bool needsOverride = !wasIdle && newPriority != oldPriority;
if (needsOverride) {
overrideJob.addToState(this, newState);
} else {
overrideJob.setNotNeeded();
}
// If we don't need an override job, then we might be able to
// create an inline job; flag that.
bool hasActiveInlineJob = newState.Flags.hasActiveInlineJob();
if (wasIdle && !hasActiveInlineJob)
newState.Flags.setHasActiveInlineJob(true);
// Make sure the status is at least Scheduled. We'll actually
// schedule the job below, if we succeed at this.
if (wasIdle) {
newState.Flags.setStatus(Status::Scheduled);
}
// Try the compare-exchange, and try again if it fails.
if (!CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_relaxed))
continue;
// Okay, we successfully updated the status. Schedule a job to
// process the actor if necessary.
// Commit the override job if we created one.
overrideJob.commit();
// If the actor is currently idle, schedule it using the
// invasive job.
if (wasIdle) {
assert(!needsOverride);
scheduleNonOverrideProcessJob(newPriority, hasActiveInlineJob);
}
return;
}
}
bool DefaultActorImpl::tryAssumeThread(RunningJobInfo runner) {
// We have to load-acquire in order to properly order accesses to
// the actor's state for the new task.
auto oldState = CurrentState.load(std::memory_order_acquire);
// If the actor is currently idle, try to mark it as running.
while (oldState.Flags.getStatus() == Status::Idle) {
assert(!oldState.FirstJob);
auto newState = oldState;
newState.Flags.setStatus(Status::Running);
newState.Flags.setMaxPriority(runner.Priority);
if (CurrentState.compare_exchange_weak(oldState, newState,
/*success*/ std::memory_order_relaxed,
/*failure*/ std::memory_order_acquire))
return true;
}
return false;
}
void swift::swift_defaultActor_initialize(DefaultActor *_actor) {
asImpl(_actor)->initialize();
}
void swift::swift_defaultActor_destroy(DefaultActor *_actor) {
asImpl(_actor)->destroy();
}
void swift::swift_defaultActor_enqueue(Job *job, DefaultActor *_actor) {
asImpl(_actor)->enqueue(job);
}
/*****************************************************************************/
/****************************** ACTOR SWITCHING ******************************/
/*****************************************************************************/
/// Can the current executor give up its thread?
static bool canGiveUpThreadForSwitch(ExecutorRef currentExecutor) {
// We can certainly "give up" a generic executor to try to run
// a task for an actor.
if (currentExecutor.isGeneric())
return true;
// If the current executor is a default actor, we know how to make
// it give up its thread.
if (currentExecutor.isDefaultActor())
return true;
return false;
}
/// Tell the current executor to give up its thread, given that it
/// returned true from canGiveUpThreadForSwitch().
///
/// Note that we don't update DefaultActorProcessingFrame here; we'll
/// do that in runOnAssumedThread.
static void giveUpThreadForSwitch(ExecutorRef currentExecutor,
RunningJobInfo runner) {
if (currentExecutor.isGeneric())
return;
asImpl(currentExecutor.getDefaultActor())->giveUpThread(runner);
}
/// Try to assume control of the current thread for the given executor
/// in order to run the given job.
///
/// This doesn't actually run the job yet.
///
/// Note that we don't update DefaultActorProcessingFrame here; we'll
/// do that in runOnAssumedThread.
static bool tryAssumeThreadForSwitch(ExecutorRef newExecutor,
RunningJobInfo runner) {
// If the new executor is generic, we don't need to do anything.
if (newExecutor.isGeneric()) {
return true;
}
// If the new executor is a default actor, ask it to assume the thread.
if (newExecutor.isDefaultActor()) {
return asImpl(newExecutor.getDefaultActor())->tryAssumeThread(runner);
}
return false;
}
/// Given that we've assumed control of an executor on this thread,
/// run the given task on it.
SWIFT_CC(swiftasync)
static void runOnAssumedThread(AsyncTask *task, ExecutorRef newExecutor,
RunningJobInfo runner) {
assert(newExecutor.isGeneric() || newExecutor.isDefaultActor());
DefaultActorImpl *actor = newExecutor.isGeneric()
? nullptr
: asImpl(newExecutor.getDefaultActor());
// Set that this actor is now the active default actor on this thread,
// and set up an actor-processing frame if there wasn't one already.
DefaultActorProcessingFrame frame(actor, UpdateExistingFrame);
// If one already existed, we should just tail-call the task; we don't
// want these frames to potentially accumulate linearly.
if (!frame.isNeeded()) {
// FIXME: force tail call
return task->run(newExecutor);
}
// Otherwise, run the new task.
task->run(newExecutor);
// Leave the processing frame, and give up the current actor if
// we have one.
//
// In principle, we could execute more tasks here, but that's probably
// not a reasonable thing to do in an assumed context rather than a
// dedicated actor-processing job.
actor = frame.getActiveActor();
frame.exit();
if (actor)
actor->giveUpThread(runner);
}
void swift::swift_task_switch(AsyncTask *task, ExecutorRef currentExecutor,
ExecutorRef newExecutor) {
assert(task && "no task provided");
// If the current executor is compatible with running the new executor,
// just continue running.
if (!currentExecutor.mustSwitchToRun(newExecutor)) {
// FIXME: force tail call
return task->run(currentExecutor);
}
// Okay, we semantically need to switch.
auto runner = RunningJobInfo::forOther(task->getPriority());
// If the current executor can give up its thread, and the new executor
// can take over a thread, try to do so; but don't do this if we've
// been asked to yield the thread.
if (canGiveUpThreadForSwitch(currentExecutor) &&
!shouldYieldThread() &&
tryAssumeThreadForSwitch(newExecutor, runner)) {
giveUpThreadForSwitch(currentExecutor, runner);
// FIXME: force tail call
return runOnAssumedThread(task, newExecutor, runner);
}
// Otherwise, just asynchronously enqueue the task on the given
// executor.
swift_task_enqueue(task, newExecutor);
}
/*****************************************************************************/
/************************* GENERIC ACTOR INTERFACES **************************/
/*****************************************************************************/
void swift::swift_task_enqueue(Job *job, ExecutorRef executor) {
assert(job && "no job provided");
if (executor.isGeneric())
return swift_task_enqueueGlobal(job);
if (executor.isDefaultActor())
return asImpl(executor.getDefaultActor())->enqueue(job);
// Just assume it's actually a default actor that we haven't tagged
// properly.
// FIXME: call the general method.
return asImpl(reinterpret_cast<DefaultActor*>(executor.getRawValue()))
->enqueue(job);
}
| jmgc/swift | stdlib/public/Concurrency/Actor.cpp | C++ | apache-2.0 | 47,851 |
var CaoTest = CaoTest || {}
//把一个对象的自有属性copy到一个新的对象里 浅克隆
CaoTest.clone = function {
objClone = {};
each(obj, function(value, key) {
if (obj.hasOwnProperty(key)) objClone[key] = value;
});
return objClone;
} | jcto/DBWeb | utils/clone.js | JavaScript | apache-2.0 | 255 |
<?php
/**
* This file is part of Notadd.
*
* @author TwilRoad <heshudong@ibenchu.com>
* @copyright (c) 2017, notadd.com
* @datetime 2017-03-01 15:29
*/
namespace Notadd\Foundation\Translation\Events;
/**
* Class LocaleUpdated.
*/
class LocaleUpdated
{
/**
* The new locale.
*
* @var string
*/
public $locale;
/**
* Create a new event instance.
*
* @param string $locale
*/
public function __construct($locale)
{
$this->locale = $locale;
}
}
| notadd/framework | src/Translation/Events/LocaleUpdated.php | PHP | apache-2.0 | 523 |
package org.apache.ode.bpel.engine.fc.excp;
/**
*
* @author Alex Hummel
*
*/
public class FragmentCompositionException extends Exception {
private static final long serialVersionUID = 9052461952290680611L;
public FragmentCompositionException() {
super();
}
public FragmentCompositionException(String message) {
super(message);
}
public FragmentCompositionException(Exception e) {
super(e);
}
}
| TheRingbearer/HAWKS | ode/bpel-runtime/src/main/java/org/apache/ode/bpel/engine/fc/excp/FragmentCompositionException.java | Java | apache-2.0 | 418 |
/* Generic definitions */
/* Assertions (useful to generate conditional code) */
/* Current type and class (and size, if applicable) */
/* Value methods */
/* Interfaces (keys) */
/* Interfaces (values) */
/* Abstract implementations (keys) */
/* Abstract implementations (values) */
/* Static containers (keys) */
/* Static containers (values) */
/* Implementations */
/* Synchronized wrappers */
/* Unmodifiable wrappers */
/* Other wrappers */
/* Methods (keys) */
/* Methods (values) */
/* Methods (keys/values) */
/* Methods that have special names depending on keys (but the special names depend on values) */
/* Equality */
/* Object/Reference-only definitions (keys) */
/* Object/Reference-only definitions (values) */
/* Primitive-type-only definitions (values) */
/*
* Copyright (C) 2002-2013 Sebastiano Vigna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.unimi.dsi.fastutil.objects;
import it.unimi.dsi.fastutil.objects.ObjectSortedSet;
import it.unimi.dsi.fastutil.objects.ObjectSortedSets;
import java.util.Comparator;
import java.util.Map;
import java.util.SortedMap;
import java.util.NoSuchElementException;
/** A class providing static methods and objects that do useful things with type-specific sorted maps.
*
* @see java.util.Collections
*/
public class Object2CharSortedMaps {
private Object2CharSortedMaps() {}
/** Returns a comparator for entries based on a given comparator on keys.
*
* @param comparator a comparator on keys.
* @return the associated comparator on entries.
*/
public static <K> Comparator<? super Map.Entry<K, ?>> entryComparator( final Comparator <K> comparator ) {
return new Comparator<Map.Entry<K, ?>>() {
public int compare( Map.Entry<K, ?> x, Map.Entry<K, ?> y ) {
return comparator.compare( x.getKey(), y.getKey() );
}
};
}
/** An immutable class representing an empty type-specific sorted map.
*
* <P>This class may be useful to implement your own in case you subclass
* a type-specific sorted map.
*/
public static class EmptySortedMap <K> extends Object2CharMaps.EmptyMap <K> implements Object2CharSortedMap <K>, java.io.Serializable, Cloneable {
private static final long serialVersionUID = -7046029254386353129L;
protected EmptySortedMap() {}
public Comparator <? super K> comparator() { return null; }
@SuppressWarnings("unchecked")
public ObjectSortedSet<Object2CharMap.Entry <K> > object2CharEntrySet() { return ObjectSortedSets.EMPTY_SET; }
@SuppressWarnings("unchecked")
public ObjectSortedSet<Map.Entry<K, Character>> entrySet() { return ObjectSortedSets.EMPTY_SET; }
@SuppressWarnings("unchecked")
public ObjectSortedSet <K> keySet() { return ObjectSortedSets.EMPTY_SET; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> subMap( final K from, final K to ) { return EMPTY_MAP; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> headMap( final K to ) { return EMPTY_MAP; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> tailMap( final K from ) { return EMPTY_MAP; }
public K firstKey() { throw new NoSuchElementException(); }
public K lastKey() { throw new NoSuchElementException(); }
}
/** An empty type-specific sorted map (immutable). It is serializable and cloneable. */
@SuppressWarnings("rawtypes")
public static final EmptySortedMap EMPTY_MAP = new EmptySortedMap();
/** An immutable class representing a type-specific singleton sorted map.
*
* <P>This class may be useful to implement your own in case you subclass
* a type-specific sorted map.
*/
public static class Singleton <K> extends Object2CharMaps.Singleton <K> implements Object2CharSortedMap <K>, java.io.Serializable, Cloneable {
private static final long serialVersionUID = -7046029254386353129L;
protected final Comparator <? super K> comparator;
protected Singleton( final K key, final char value, Comparator <? super K> comparator ) {
super( key, value );
this.comparator = comparator;
}
protected Singleton( final K key, final char value ) {
this( key, value, null );
}
@SuppressWarnings("unchecked")
final int compare( final K k1, final K k2 ) {
return comparator == null ? ( ((Comparable<K>)(k1)).compareTo(k2) ) : comparator.compare( k1, k2 );
}
public Comparator <? super K> comparator() { return comparator; }
@SuppressWarnings("unchecked")
public ObjectSortedSet<Object2CharMap.Entry <K> > object2CharEntrySet() { if ( entries == null ) entries = ObjectSortedSets.singleton( (Object2CharMap.Entry <K>)new SingletonEntry(), (Comparator<? super Object2CharMap.Entry <K> >)entryComparator( comparator ) ); return (ObjectSortedSet<Object2CharMap.Entry <K> >)entries; }
@SuppressWarnings({ "rawtypes", "unchecked" })
public ObjectSortedSet<Map.Entry<K, Character>> entrySet() { return (ObjectSortedSet)object2CharEntrySet(); }
public ObjectSortedSet <K> keySet() { if ( keys == null ) keys = ObjectSortedSets.singleton( key, comparator ); return (ObjectSortedSet <K>)keys; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> subMap( final K from, final K to ) { if ( compare( from, key ) <= 0 && compare( key, to ) < 0 ) return this; return EMPTY_MAP; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> headMap( final K to ) { if ( compare( key, to ) < 0 ) return this; return EMPTY_MAP; }
@SuppressWarnings("unchecked")
public Object2CharSortedMap <K> tailMap( final K from ) { if ( compare( from, key ) <= 0 ) return this; return EMPTY_MAP; }
public K firstKey() { return key; }
public K lastKey() { return key; }
}
/** Returns a type-specific immutable sorted map containing only the specified pair. The returned sorted map is serializable and cloneable.
*
* <P>Note that albeit the returned map is immutable, its default return value may be changed.
*
* @param key the only key of the returned sorted map.
* @param value the only value of the returned sorted map.
* @return a type-specific immutable sorted map containing just the pair <code><key,value></code>.
*/
public static <K> Object2CharSortedMap <K> singleton( final K key, Character value ) {
return new Singleton <K>( (key), ((value).charValue()) );
}
/** RETURNS a type-specific immutable sorted map containing only the specified pair. The returned sorted map is serializable and cloneable.
*
* <P>Note that albeit the returned map is immutable, its default return value may be changed.
*
* @param key the only key of the returned sorted map.
* @param value the only value of the returned sorted map.
* @param comparator the comparator to use in the returned sorted map.
* @return a type-specific immutable sorted map containing just the pair <code><key,value></code>.
*/
public static <K> Object2CharSortedMap <K> singleton( final K key, Character value, Comparator <? super K> comparator ) {
return new Singleton <K>( (key), ((value).charValue()), comparator );
}
/** Returns a type-specific immutable sorted map containing only the specified pair. The returned sorted map is serializable and cloneable.
*
* <P>Note that albeit the returned map is immutable, its default return value may be changed.
*
* @param key the only key of the returned sorted map.
* @param value the only value of the returned sorted map.
* @return a type-specific immutable sorted map containing just the pair <code><key,value></code>.
*/
public static <K> Object2CharSortedMap <K> singleton( final K key, final char value ) {
return new Singleton <K>( key, value );
}
/** Returns a type-specific immutable sorted map containing only the specified pair. The returned sorted map is serializable and cloneable.
*
* <P>Note that albeit the returned map is immutable, its default return value may be changed.
*
* @param key the only key of the returned sorted map.
* @param value the only value of the returned sorted map.
* @param comparator the comparator to use in the returned sorted map.
* @return a type-specific immutable sorted map containing just the pair <code><key,value></code>.
*/
public static <K> Object2CharSortedMap <K> singleton( final K key, final char value, Comparator <? super K> comparator ) {
return new Singleton <K>( key, value, comparator );
}
/** A synchronized wrapper class for sorted maps. */
public static class SynchronizedSortedMap <K> extends Object2CharMaps.SynchronizedMap <K> implements Object2CharSortedMap <K>, java.io.Serializable {
private static final long serialVersionUID = -7046029254386353129L;
protected final Object2CharSortedMap <K> sortedMap;
protected SynchronizedSortedMap( final Object2CharSortedMap <K> m, final Object sync ) {
super( m, sync );
sortedMap = m;
}
protected SynchronizedSortedMap( final Object2CharSortedMap <K> m ) {
super( m );
sortedMap = m;
}
public Comparator <? super K> comparator() { synchronized( sync ) { return sortedMap.comparator(); } }
public ObjectSortedSet<Object2CharMap.Entry <K> > object2CharEntrySet() { if ( entries == null ) entries = ObjectSortedSets.synchronize( sortedMap.object2CharEntrySet(), sync ); return (ObjectSortedSet<Object2CharMap.Entry <K> >)entries; }
@SuppressWarnings({ "rawtypes", "unchecked" })
public ObjectSortedSet<Map.Entry<K, Character>> entrySet() { return (ObjectSortedSet)object2CharEntrySet(); }
public ObjectSortedSet <K> keySet() { if ( keys == null ) keys = ObjectSortedSets.synchronize( sortedMap.keySet(), sync ); return (ObjectSortedSet <K>)keys; }
public Object2CharSortedMap <K> subMap( final K from, final K to ) { return new SynchronizedSortedMap <K>( sortedMap.subMap( from, to ), sync ); }
public Object2CharSortedMap <K> headMap( final K to ) { return new SynchronizedSortedMap <K>( sortedMap.headMap( to ), sync ); }
public Object2CharSortedMap <K> tailMap( final K from ) { return new SynchronizedSortedMap <K>( sortedMap.tailMap( from ), sync ); }
public K firstKey() { synchronized( sync ) { return sortedMap.firstKey(); } }
public K lastKey() { synchronized( sync ) { return sortedMap.lastKey(); } }
}
/** Returns a synchronized type-specific sorted map backed by the given type-specific sorted map.
*
* @param m the sorted map to be wrapped in a synchronized sorted map.
* @return a synchronized view of the specified sorted map.
* @see java.util.Collections#synchronizedSortedMap(SortedMap)
*/
public static <K> Object2CharSortedMap <K> synchronize( final Object2CharSortedMap <K> m ) { return new SynchronizedSortedMap <K>( m ); }
/** Returns a synchronized type-specific sorted map backed by the given type-specific sorted map, using an assigned object to synchronize.
*
* @param m the sorted map to be wrapped in a synchronized sorted map.
* @param sync an object that will be used to synchronize the access to the sorted sorted map.
* @return a synchronized view of the specified sorted map.
* @see java.util.Collections#synchronizedSortedMap(SortedMap)
*/
public static <K> Object2CharSortedMap <K> synchronize( final Object2CharSortedMap <K> m, final Object sync ) { return new SynchronizedSortedMap <K>( m, sync ); }
/** An unmodifiable wrapper class for sorted maps. */
public static class UnmodifiableSortedMap <K> extends Object2CharMaps.UnmodifiableMap <K> implements Object2CharSortedMap <K>, java.io.Serializable {
private static final long serialVersionUID = -7046029254386353129L;
protected final Object2CharSortedMap <K> sortedMap;
protected UnmodifiableSortedMap( final Object2CharSortedMap <K> m ) {
super( m );
sortedMap = m;
}
public Comparator <? super K> comparator() { return sortedMap.comparator(); }
public ObjectSortedSet<Object2CharMap.Entry <K> > object2CharEntrySet() { if ( entries == null ) entries = ObjectSortedSets.unmodifiable( sortedMap.object2CharEntrySet() ); return (ObjectSortedSet<Object2CharMap.Entry <K> >)entries; }
@SuppressWarnings({ "rawtypes", "unchecked" })
public ObjectSortedSet<Map.Entry<K, Character>> entrySet() { return (ObjectSortedSet)object2CharEntrySet(); }
public ObjectSortedSet <K> keySet() { if ( keys == null ) keys = ObjectSortedSets.unmodifiable( sortedMap.keySet() ); return (ObjectSortedSet <K>)keys; }
public Object2CharSortedMap <K> subMap( final K from, final K to ) { return new UnmodifiableSortedMap <K>( sortedMap.subMap( from, to ) ); }
public Object2CharSortedMap <K> headMap( final K to ) { return new UnmodifiableSortedMap <K>( sortedMap.headMap( to ) ); }
public Object2CharSortedMap <K> tailMap( final K from ) { return new UnmodifiableSortedMap <K>( sortedMap.tailMap( from ) ); }
public K firstKey() { return sortedMap.firstKey(); }
public K lastKey() { return sortedMap.lastKey(); }
}
/** Returns an unmodifiable type-specific sorted map backed by the given type-specific sorted map.
*
* @param m the sorted map to be wrapped in an unmodifiable sorted map.
* @return an unmodifiable view of the specified sorted map.
* @see java.util.Collections#unmodifiableSortedMap(SortedMap)
*/
public static <K> Object2CharSortedMap <K> unmodifiable( final Object2CharSortedMap <K> m ) { return new UnmodifiableSortedMap <K>( m ); }
}
| karussell/fastutil | src/it/unimi/dsi/fastutil/objects/Object2CharSortedMaps.java | Java | apache-2.0 | 13,714 |
/*
*
* Copyright 2016 Robert Winkler
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package io.github.robwin.circuitbreaker;
import io.github.robwin.circuitbreaker.internal.RingBitSet;
import org.openjdk.jmh.annotations.*;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@BenchmarkMode(Mode.All)
public class RingBitSetBenachmark {
private RingBitSet ringBitSet;
private static final int ITERATION_COUNT = 2;
private static final int WARMUP_COUNT = 2;
private static final int THREAD_COUNT = 10;
public static final int FORK_COUNT = 1;
@Setup
public void setUp() {
ringBitSet = new RingBitSet(1000);
}
@Benchmark
@Fork(value = FORK_COUNT)
@Threads(value = THREAD_COUNT)
@Warmup(iterations = WARMUP_COUNT)
@Measurement(iterations = ITERATION_COUNT)
public void setBits(){
ringBitSet.setNextBit(true);
ringBitSet.setNextBit(false);
}
}
| storozhukBM/javaslang-circuitbreaker | src/jmh/java/io/github/robwin/circuitbreaker/RingBitSetBenachmark.java | Java | apache-2.0 | 1,513 |
/* @flow strict-local */
import { addBreadcrumb } from '@sentry/react-native';
import type { Narrow, Stream, User } from '../types';
import { topicNarrow, streamNarrow, groupNarrow, specialNarrow } from './narrow';
import { isUrlOnRealm } from './url';
const getPathsFromUrl = (url: string = '', realm: string) => {
const paths = url
.split(realm)
.pop()
.split('#narrow/')
.pop()
.split('/');
if (paths.length > 0 && paths[paths.length - 1] === '') {
// url ends with /
paths.splice(-1, 1);
}
return paths;
};
/** PRIVATE -- exported only for tests. */
export const isInternalLink = (url: string, realm: string): boolean =>
isUrlOnRealm(url, realm) ? /^(\/#narrow|#narrow)/i.test(url.split(realm).pop()) : false;
/** PRIVATE -- exported only for tests. */
export const isMessageLink = (url: string, realm: string): boolean =>
isInternalLink(url, realm) && url.includes('near');
type LinkType = 'external' | 'home' | 'pm' | 'topic' | 'stream' | 'special';
export const getLinkType = (url: string, realm: string): LinkType => {
if (!isInternalLink(url, realm)) {
return 'external';
}
const paths = getPathsFromUrl(url, realm);
if (
(paths.length === 2 && paths[0] === 'pm-with')
|| (paths.length === 4 && paths[0] === 'pm-with' && paths[2] === 'near')
) {
return 'pm';
}
if (
(paths.length === 4 || paths.length === 6)
&& paths[0] === 'stream'
&& (paths[2] === 'subject' || paths[2] === 'topic')
) {
return 'topic';
}
if (paths.length === 2 && paths[0] === 'stream') {
return 'stream';
}
if (paths.length === 2 && paths[0] === 'is' && /^(private|starred|mentioned)/i.test(paths[1])) {
return 'special';
}
return 'home';
};
/** Decode a dot-encoded string. */
// The Zulip webapp uses this encoding in narrow-links:
// https://github.com/zulip/zulip/blob/1577662a6/static/js/hash_util.js#L18-L25
export const decodeHashComponent = (string: string): string => {
try {
return decodeURIComponent(string.replace(/\./g, '%'));
} catch (err) {
// `decodeURIComponent` throws strikingly uninformative errors
addBreadcrumb({
level: 'info',
type: 'decoding',
message: 'decodeHashComponent error',
data: { input: string },
});
throw err;
}
};
/** Parse the operand of a `stream` operator, returning a stream name. */
const parseStreamOperand = (operand, streamsById): string => {
// "New" (2018) format: ${stream_id}-${stream_name} .
const match = /^(\d+)-/.exec(operand);
if (match) {
const stream = streamsById.get(parseInt(match[0], 10));
if (stream) {
return stream.name;
}
}
// Old format: just stream name. This case is relevant indefinitely,
// so that links in old conversations continue to work.
return decodeHashComponent(operand);
};
/** Parse the operand of a `topic` or `subject` operator. */
const parseTopicOperand = operand => decodeHashComponent(operand);
/** Parse the operand of a `pm-with` operator. */
const parsePmOperand = (operand, usersById) => {
const recipientIds = operand.split('-')[0].split(',');
const recipientEmails = [];
for (let i = 0; i < recipientIds.length; ++i) {
const user = usersById.get(parseInt(recipientIds[i], 10));
if (user === undefined) {
return null;
}
recipientEmails.push(user.email);
}
return recipientEmails;
};
export const getNarrowFromLink = (
url: string,
realm: string,
usersById: Map<number, User>,
streamsById: Map<number, Stream>,
): Narrow | null => {
const type = getLinkType(url, realm);
const paths = getPathsFromUrl(url, realm);
switch (type) {
case 'pm': {
const recipientEmails = parsePmOperand(paths[1], usersById);
if (recipientEmails === null) {
return null;
}
return groupNarrow(recipientEmails);
}
case 'topic':
return topicNarrow(parseStreamOperand(paths[1], streamsById), parseTopicOperand(paths[3]));
case 'stream':
return streamNarrow(parseStreamOperand(paths[1], streamsById));
case 'special':
return specialNarrow(paths[1]);
default:
return null;
}
};
export const getMessageIdFromLink = (url: string, realm: string): number => {
const paths = getPathsFromUrl(url, realm);
return isMessageLink(url, realm) ? parseInt(paths[paths.lastIndexOf('near') + 1], 10) : 0;
};
| vishwesh3/zulip-mobile | src/utils/internalLinks.js | JavaScript | apache-2.0 | 4,379 |
/*
* Copyright 2015 Doltech Systems Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package nz.co.doltech.gwtjui.demo.client.application.home;
import com.google.inject.Inject;
import com.google.web.bindery.event.shared.EventBus;
import com.gwtplatform.mvp.client.HasUiHandlers;
import com.gwtplatform.mvp.client.Presenter;
import com.gwtplatform.mvp.client.View;
import com.gwtplatform.mvp.client.annotations.NameToken;
import com.gwtplatform.mvp.client.annotations.ProxyCodeSplit;
import com.gwtplatform.mvp.client.proxy.ProxyPlace;
import nz.co.doltech.gwtjui.demo.client.application.ApplicationPresenter;
import nz.co.doltech.gwtjui.demo.client.place.NameTokens;
public class HomePresenter extends Presenter<HomePresenter.MyView, HomePresenter.MyProxy> implements HomeUiHandlers {
interface MyView extends View, HasUiHandlers<HomeUiHandlers> {
}
@NameToken(NameTokens.home)
@ProxyCodeSplit
interface MyProxy extends ProxyPlace<HomePresenter> {
}
@Inject
HomePresenter(EventBus eventBus, MyView view, MyProxy proxy) {
super(eventBus, view, proxy, ApplicationPresenter.TYPE_SetMainContent);
getView().setUiHandlers(this);
}
}
| BenDol/gwt-jui-demo | src/main/java/nz/co/doltech/gwtjui/demo/client/application/home/HomePresenter.java | Java | apache-2.0 | 1,700 |
<?php
/*
*---------------------------------------------------------------
* APPLICATION ENVIRONMENT
*---------------------------------------------------------------
*
* You can load different configurations depending on your
* current environment. Setting the environment also influences
* things like logging and error reporting.
*
* This can be set to anything, but default usage is:
*
* development
* testing
* production
*
* NOTE: If you change these, also change the error_reporting() code below
*
*/
define('ENVIRONMENT', 'development');
/*
*---------------------------------------------------------------
* ERROR REPORTING
*---------------------------------------------------------------
*
* Different environments will require different levels of error reporting.
* By default development will show errors but testing and live will hide them.
*/
if (defined('ENVIRONMENT'))
{
switch (ENVIRONMENT)
{
case 'development':
error_reporting(E_ALL);
break;
case 'testing':
case 'production':
error_reporting(0);
break;
default:
exit('The application environment is not set correctly.');
}
}
/*
*---------------------------------------------------------------
* SYSTEM FOLDER NAME
*---------------------------------------------------------------
*
* This variable must contain the name of your "system" folder.
* Include the path if the folder is not in the same directory
* as this file.
*
*/
$system_path = 'vendor/rogeriopradoj/codeigniter/system';
/*
*---------------------------------------------------------------
* APPLICATION FOLDER NAME
*---------------------------------------------------------------
*
* If you want this front controller to use a different "application"
* folder then the default one you can set its name here. The folder
* can also be renamed or relocated anywhere on your server. If
* you do, use a full server path. For more info please see the user guide:
* http://codeigniter.com/user_guide/general/managing_apps.html
*
* NO TRAILING SLASH!
*
*/
$application_folder = 'application';
/*
* --------------------------------------------------------------------
* DEFAULT CONTROLLER
* --------------------------------------------------------------------
*
* Normally you will set your default controller in the routes.php file.
* You can, however, force a custom routing by hard-coding a
* specific controller class/function here. For most applications, you
* WILL NOT set your routing here, but it's an option for those
* special instances where you might want to override the standard
* routing in a specific front controller that shares a common CI installation.
*
* IMPORTANT: If you set the routing here, NO OTHER controller will be
* callable. In essence, this preference limits your application to ONE
* specific controller. Leave the function name blank if you need
* to call functions dynamically via the URI.
*
* Un-comment the $routing array below to use this feature
*
*/
// The directory name, relative to the "controllers" folder. Leave blank
// if your controller is not in a sub-folder within the "controllers" folder
// $routing['directory'] = '';
// The controller class file name. Example: Mycontroller
// $routing['controller'] = '';
// The controller function you wish to be called.
// $routing['function'] = '';
/*
* -------------------------------------------------------------------
* CUSTOM CONFIG VALUES
* -------------------------------------------------------------------
*
* The $assign_to_config array below will be passed dynamically to the
* config class when initialized. This allows you to set custom config
* items or override any default config values found in the config.php file.
* This can be handy as it permits you to share one application between
* multiple front controller files, with each file containing different
* config values.
*
* Un-comment the $assign_to_config array below to use this feature
*
*/
// $assign_to_config['name_of_config_item'] = 'value of config item';
// --------------------------------------------------------------------
// END OF USER CONFIGURABLE SETTINGS. DO NOT EDIT BELOW THIS LINE
// --------------------------------------------------------------------
/*
* ---------------------------------------------------------------
* Resolve the system path for increased reliability
* ---------------------------------------------------------------
*/
// Set the current directory correctly for CLI requests
if (defined('STDIN'))
{
chdir(dirname(__FILE__));
}
if (realpath($system_path) !== FALSE)
{
$system_path = realpath($system_path).'/';
}
// ensure there's a trailing slash
$system_path = rtrim($system_path, '/').'/';
// Is the system path correct?
if ( ! is_dir($system_path))
{
exit("Your system folder path does not appear to be set correctly. Please open the following file and correct this: ".pathinfo(__FILE__, PATHINFO_BASENAME));
}
/*
* -------------------------------------------------------------------
* Now that we know the path, set the main path constants
* -------------------------------------------------------------------
*/
// The name of THIS file
define('SELF', pathinfo(__FILE__, PATHINFO_BASENAME));
// The PHP file extension
// this global constant is deprecated.
define('EXT', '.php');
// Path to the system folder
define('BASEPATH', str_replace("\\", "/", $system_path));
// Path to the front controller (this file)
define('FCPATH', str_replace(SELF, '', __FILE__));
// Name of the "system folder"
define('SYSDIR', trim(strrchr(trim(BASEPATH, '/'), '/'), '/'));
// The path to the "application" folder
if (is_dir($application_folder))
{
define('APPPATH', $application_folder.'/');
}
else
{
if ( ! is_dir(BASEPATH.$application_folder.'/'))
{
exit("Your application folder path does not appear to be set correctly. Please open the following file and correct this: ".SELF);
}
define('APPPATH', BASEPATH.$application_folder.'/');
}
/*
* --------------------------------------------------------------------
* LOAD THE BOOTSTRAP FILE
* --------------------------------------------------------------------
*
* And away we go...
*
*/
require_once BASEPATH.'core/CodeIgniter.php';
/* End of file index.php */
/* Location: ./index.php */
| oclc-developer-house/thirdpartyapi | index.php | PHP | apache-2.0 | 6,390 |
package br.com.cofagra.bi.renders;
import java.io.IOException;
import javax.faces.component.UIComponent;
import javax.faces.context.FacesContext;
import javax.faces.context.ResponseWriter;
import lombok.extern.java.Log;
import org.primefaces.component.inputmask.InputMask;
import org.primefaces.component.inputmask.InputMaskRenderer;
import org.primefaces.util.ComponentUtils;
import org.primefaces.util.HTML;
/**
* Render respons�vel por sobrescrever o componente
* <p:inputMask/>
*
* @author thiagosampaio
*/
@Log
public class ExtPrimeInputMaskRenderer extends InputMaskRenderer{
@Override
public void decode(FacesContext context, UIComponent component) {
InputMask inputMask = (InputMask) component;
if (inputMask.isDisabled() || inputMask.isReadonly()) {
return;
}
decodeBehaviors(context, inputMask);
String clientId = inputMask.getClientId(context);
String submittedValue = (String) context.getExternalContext().getRequestParameterMap().get(clientId);
if (submittedValue != null) {
inputMask.setSubmittedValue(submittedValue);
}
}
@Override
public void encodeEnd(FacesContext context, UIComponent component) throws IOException {
InputMask inputMask = (InputMask) component;
encodeMarkup(context, inputMask);
encodeScript(context, inputMask);
}
protected void encodeScript(FacesContext context, InputMask inputMask) throws IOException {
ResponseWriter writer = context.getResponseWriter();
String clientId = inputMask.getClientId(context);
String mask = inputMask.getMask();
startScript(writer, clientId);
writer.write("PrimeFaces.cw('InputMask','" + inputMask.resolveWidgetVar() + "',{");
writer.write("id:'" + clientId + "'");
if (mask != null) {
writer.write(",mask:'" + inputMask.getMask() + "'");
if (inputMask.getPlaceHolder() != null)
writer.write(",placeholder:'" + inputMask.getPlaceHolder() + "'");
}
encodeClientBehaviors(context, inputMask);
writer.write("});");
endScript(writer);
}
protected void encodeMarkup(FacesContext context, InputMask inputMask) throws IOException {
ResponseWriter writer = context.getResponseWriter();
String clientId = inputMask.getClientId(context);
String styleClass = inputMask.getStyleClass();
String defaultClass = InputMask.STYLE_CLASS;
defaultClass = !inputMask.isValid() ? defaultClass + " ui-state-error" : defaultClass;
// Comentando pois quando o disable esta trus a classe 'ui-state-disabled' estava quabrando o twitter bootstrap
// defaultClass = inputMask.isDisabled() ? defaultClass + " ui-state-disabled" : defaultClass;
styleClass = styleClass == null ? defaultClass : defaultClass + " " + styleClass;
writer.startElement("input", null);
writer.writeAttribute("id", clientId, null);
writer.writeAttribute("name", clientId, null);
writer.writeAttribute("type", "text", null);
String valueToRender = ComponentUtils.getValueToRender(context, inputMask);
if (valueToRender != null) {
writer.writeAttribute("value", valueToRender, null);
}
renderPassThruAttributes(context, inputMask, HTML.INPUT_TEXT_ATTRS);
if (inputMask.isDisabled())
writer.writeAttribute("disabled", "disabled", "disabled");
if (inputMask.isReadonly())
writer.writeAttribute("readonly", "readonly", "readonly");
if (inputMask.getStyle() != null)
writer.writeAttribute("style", inputMask.getStyle(), "style");
writer.writeAttribute("class", styleClass, "styleClass");
writer.endElement("input");
}
} | thiagonego/cofagra_bi | src/main/java/br/com/cofagra/bi/renders/ExtPrimeInputMaskRenderer.java | Java | apache-2.0 | 4,004 |
package it.polimi.dima.giftlist.presentation.presenter;
import com.pushtorefresh.storio.sqlite.StorIOSQLite;
import com.pushtorefresh.storio.sqlite.operations.put.PutResults;
import com.pushtorefresh.storio.sqlite.queries.DeleteQuery;
import com.pushtorefresh.storio.sqlite.queries.Query;
import com.pushtorefresh.storio.sqlite.queries.RawQuery;
import org.greenrobot.eventbus.EventBus;
import org.greenrobot.eventbus.Subscribe;
import java.io.File;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import javax.inject.Inject;
import hugo.weaving.DebugLog;
import it.polimi.dima.giftlist.data.db.table.EbayProductTable;
import it.polimi.dima.giftlist.data.db.table.EtsyProductTable;
import it.polimi.dima.giftlist.data.db.table.WishlistTable;
import it.polimi.dima.giftlist.data.model.EbayProduct;
import it.polimi.dima.giftlist.data.model.EtsyProduct;
import it.polimi.dima.giftlist.data.model.Product;
import it.polimi.dima.giftlist.data.model.Wishlist;
import it.polimi.dima.giftlist.domain.interactor.GetDbProductListUseCase;
import it.polimi.dima.giftlist.presentation.event.ProductRemovedEvent;
import it.polimi.dima.giftlist.presentation.event.WishlistAddedEvent;
import it.polimi.dima.giftlist.presentation.view.WishlistView;
import it.polimi.dima.giftlist.presentation.view.activity.BaseActivity;
import rx.Observer;
import rx.SingleSubscriber;
import rx.android.schedulers.AndroidSchedulers;
import timber.log.Timber;
/**
* Created by Alessandro on 18/03/16.
*/
public class WishlistPresenter extends BaseRxLcePresenter<WishlistView, List<Product>, GetDbProductListUseCase> {
@Inject
public WishlistPresenter(GetDbProductListUseCase getDbProductListUseCase, StorIOSQLite db) {
super(getDbProductListUseCase, db);
}
@Override
public void subscribe(boolean pullToRefresh) {
if (!useCase.isUnsubscribed()) {
unsubscribe();
}
useCase.execute(new BaseSubscriber(pullToRefresh));
if (isViewAttached()) {
getView().showLoading(pullToRefresh);
}
}
@Override
protected void onCompleted() {
//DB subscriptions do not complete
}
@Override
protected void onError(Throwable e, boolean pullToRefresh) {
if (isViewAttached()) {
getView().showError(e, pullToRefresh);
}
unsubscribe();
}
@Override
@DebugLog
protected void onNext(List<Product> data) {
List<Product> orderedList = new LinkedList<>(data);
Collections.sort(orderedList);
getView().setData(orderedList);
if (isViewAttached()) {
getView().showContent();
}
}
public void setActionBarDetails(long wishlistId) {
db.get()
.object(Wishlist.class)
.withQuery(Query.builder()
.table(WishlistTable.TABLE)
.where("id = ?")
.whereArgs(wishlistId)
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleSubscriber<Wishlist>() {
@Override
public void onSuccess(Wishlist value) {
getView().initCollapsingToolbar(value.getName(), value.getOccasion());
}
@Override
public void onError(Throwable error) {
Timber.d("Can't load wishlist details");
}
});
}
//I do not want the observer to emit an unnecessary onNext
//So I manually run an update query without adding the affected table
public void updateProductListOrder(List<Product> productList) {
for (Product p : productList) {
if (p instanceof EbayProduct) {
db.executeSQL()
.withQuery(RawQuery.builder()
.query(EbayProductTable.getDisplayOrderUpdateQuery(p.getId(), p.getDisplayOrder()))
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleSubscriber<Object>() {
@Override
public void onSuccess(Object value) {
Timber.d("Product %d is set at order %d in DB", p.getId(), p.getDisplayOrder());
}
@Override
public void onError(Throwable error) {
Timber.d("Error in setting product %d at order %d in DB", p.getId(), p.getDisplayOrder());
}
});
}
if (p instanceof EtsyProduct) {
db.executeSQL()
.withQuery(RawQuery.builder()
.query(EtsyProductTable.getDisplayOrderUpdateQuery(p.getId(), p.getDisplayOrder()))
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleSubscriber<Object>() {
@Override
public void onSuccess(Object value) {
Timber.d("Product %d is set at order %d in DB", p.getId(), p.getDisplayOrder());
}
@Override
public void onError(Throwable error) {
Timber.d("Error in setting product %d at order %d in DB", p.getId(), p.getDisplayOrder());
}
});
}
}
}
public void removeProduct(Product product) {
deleteImages(product.getImageUri());
long productId = product.getId();
if (product instanceof EtsyProduct) {
db.executeSQL()
.withQuery(RawQuery.builder()
.query(EtsyProductTable.getCustomDeleteQuery(productId))
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread()) //all Observables in StorIO already subscribed on Schedulers.io(), you just need to set observeOn()
.subscribe(new SingleSubscriber<Object>() {
@Override
public void onSuccess(Object value) {
Timber.d("Success in deleting the product %d", productId);
}
@Override
public void onError(Throwable error) {
Timber.d("Error in deleting the product %d", productId);
}
});
}
if (product instanceof EbayProduct) {
db.executeSQL()
.withQuery(RawQuery.builder()
.query(EbayProductTable.getCustomDeleteQuery(productId))
.build())
.prepare()
.asRxSingle()
.observeOn(AndroidSchedulers.mainThread()) //all Observables in StorIO already subscribed on Schedulers.io(), you just need to set observeOn()
.subscribe(new SingleSubscriber<Object>() {
@Override
public void onSuccess(Object value) {
Timber.d("Success in deleting the product %d", productId);
}
@Override
public void onError(Throwable error) {
Timber.d("Error in deleting the product %d", productId);
}
});
}
}
private void deleteImages(String uri) {
File fdelete = new File(uri);
if (fdelete.exists()) {
if (fdelete.delete()) {
Timber.d("file Deleted: %s", uri);
} else {
Timber.d("file not Deleted %s", uri);
}
}
}
}
| volcacius/Giftlist | app/src/main/java/it/polimi/dima/giftlist/presentation/presenter/WishlistPresenter.java | Java | apache-2.0 | 8,395 |
<?php
final class PhabricatorPackagerCreateController
extends PhabricatorPackagerController {
public function processRequest() {
$request = $this->getRequest();
$user = $request->getUser();
$e_url = null;
$errors = array();
if ($request->isFormPost()) {
$url = $request->getStr('url');
if (empty($url)) {
$e_url = pht("Required");
$errors[] = pht("Package URL must not be empty!");
} else {
$packageObject = new PhabricatorFilePackage();
$packageObject->setAuthorPHID($user->getPHID());
$packageObject->setPackageUrl($url);
$packageObject->setDownloads(0);
$packageObject->save();
return id(new AphrontRedirectResponse())
->setURI($this->getApplicationURI('view/' . $packageObject->getID()));
}
}
$error_view = null;
if ($errors) {
$error_view = new AphrontErrorView();
$error_view->setTitle(pht('Form Errors'));
$error_view->setErrors($errors);
}
$instructions =
phutil_tag(
'p',
array(
'class' => 'aphront-form-instructions',
),
pht('Just paste a clean URL to the file in Amazon S3 here, '.
'and everything will be fine.'));
$form = id(new AphrontFormLayoutView())
->appendChild($instructions)
->appendChild(
id(new AphrontFormTextControl())
->setLabel(pht("Package Url"))
->setName("url")
->setError($e_url)
->setValue("")
->setCaption("The clean url to the file on S3"));
$dialog = new AphrontDialogView();
$dialog->setUser($user);
$dialog->setTitle(pht("Register Package"));
$dialog->appendChild($form);
$dialog->addCancelButton($this->getApplicationURI());
$dialog->addSubmitButton(pht("Register this package"));
$resp = new AphrontDialogResponse();
return $resp->setDialog($dialog);
}
}
| apexstudios/phabricator | src/applications/packager/controller/PhabricatorPackagerCreateController.php | PHP | apache-2.0 | 1,924 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2021 the original author or authors.
*/
package org.assertj.core.api.list;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.function.Consumer;
import org.assertj.core.data.TolkienCharacter;
import org.assertj.core.data.TolkienCharacterAssert;
import org.assertj.core.data.TolkienCharacterAssertFactory;
import org.junit.jupiter.api.Test;
class ListAssert_filteredOn_consumer_with_navigation_Test extends ListAssert_filteredOn_BaseTest {
private static Consumer<? super TolkienCharacter> nameStartingWithFro = hobbit -> assertThat(hobbit.getName()).startsWith("Fro");
@Test
void should_honor_AssertFactory_strongly_typed_navigation_assertions() {
// GIVEN
Iterable<TolkienCharacter> hobbits = hobbits();
TolkienCharacterAssertFactory tolkienCharacterAssertFactory = new TolkienCharacterAssertFactory();
// THEN
assertThat(hobbits, tolkienCharacterAssertFactory).filteredOnAssertions(nameStartingWithFro)
.first()
.hasAge(33);
assertThat(hobbits, tolkienCharacterAssertFactory).filteredOnAssertions(nameStartingWithFro)
.last()
.hasAge(33);
assertThat(hobbits, tolkienCharacterAssertFactory).filteredOnAssertions(nameStartingWithFro)
.element(0)
.hasAge(33);
assertThat(hobbits, tolkienCharacterAssertFactory).filteredOnAssertions(nameStartingWithFro)
.elements(0)
.first()
.hasAge(33);
}
@Test
void should_honor_ClassBased_strongly_typed_navigation_assertions() {
// GIVEN
Iterable<TolkienCharacter> hobbits = hobbits();
// THEN
assertThat(hobbits, TolkienCharacterAssert.class).filteredOnAssertions(nameStartingWithFro)
.first()
.hasAge(33);
assertThat(hobbits, TolkienCharacterAssert.class).filteredOnAssertions(nameStartingWithFro)
.last()
.hasAge(33);
assertThat(hobbits, TolkienCharacterAssert.class).filteredOnAssertions(nameStartingWithFro)
.element(0)
.hasAge(33);
assertThat(hobbits, TolkienCharacterAssert.class).filteredOnAssertions(nameStartingWithFro)
.elements(0)
.first()
.hasAge(33);
}
}
| hazendaz/assertj-core | src/test/java/org/assertj/core/api/list/ListAssert_filteredOn_consumer_with_navigation_Test.java | Java | apache-2.0 | 3,541 |
import re
import calendar
from logs_analyzer.settings import *
from logs_analyzer.validators import *
from datetime import datetime
def get_service_settings(service_name):
"""
Get default settings for the said service
:param service_name: service name (example: nginx, apache2...)
:return: service settings if found or None
"""
if service_name in SERVICES_SWITCHER:
return SERVICES_SWITCHER.get(service_name)
else:
raise Exception("Service \""+service_name+"\" doesn't exists!")
def get_date_filter(settings, minute=datetime.now().minute, hour=datetime.now().hour,
day=datetime.now().day, month=datetime.now().month,
year=datetime.now().year):
"""
Get the date pattern that can be used to filter data from logs based on the params
:raises Exception:
:param settings: dict
:param minute: int
:param hour: int
:param day: int
:param month: int
:param year: int
:return: string
"""
if not is_valid_year(year) or not is_valid_month(month) or not is_valid_day(day) \
or not is_valid_hour(hour) or not is_valid_minute(minute):
raise Exception("Date elements aren't valid")
if minute != '*' and hour != '*':
date_format = settings['dateminutes_format']
date_filter = datetime(year, month, day, hour, minute).strftime(date_format)
elif minute == '*' and hour != '*':
date_format = settings['datehours_format']
date_filter = datetime(year, month, day, hour).strftime(date_format)
elif minute == '*' and hour == '*':
date_format = settings['datedays_format']
date_filter = datetime(year, month, day).strftime(date_format)
else:
raise Exception("Date elements aren't valid")
return date_filter
def filter_data(log_filter, data=None, filepath=None, is_casesensitive=True, is_regex=False, is_reverse=False):
"""
Filter received data/file content and return the results
:except IOError:
:except EnvironmentError:
:raises Exception:
:param log_filter: string
:param data: string
:param filepath: string
:param is_casesensitive: boolean
:param is_regex: boolean
:param is_reverse: boolean to inverse selection
:return: string
"""
return_data = ""
if filepath:
try:
with open(filepath, 'r') as file_object:
for line in file_object:
if check_match(line, log_filter, is_regex, is_casesensitive, is_reverse):
return_data += line
return return_data
except (IOError, EnvironmentError) as e:
print(e.strerror)
exit(2)
elif data:
for line in data.splitlines():
if check_match(line, log_filter, is_regex, is_casesensitive, is_reverse):
return_data += line+"\n"
return return_data
else:
raise Exception("Data and filepath values are NULL!")
def check_match(line, filter_pattern, is_regex, is_casesensitive, is_reverse):
"""
Check if line contains/matches filter pattern
:param line: string
:param filter_pattern: string
:param is_regex: boolean
:param is_casesensitive: boolean
:param is_reverse: boolean
:return: boolean
"""
if is_regex:
check_result = re.match(filter_pattern, line) if is_casesensitive \
else re.match(filter_pattern, line, re.IGNORECASE)
else:
check_result = (filter_pattern in line) if is_casesensitive else (filter_pattern.lower() in line.lower())
return check_result and not is_reverse
def get_web_requests(data, pattern, date_pattern=None, date_keys=None):
"""
Analyze data (from the logs) and return list of requests formatted as the model (pattern) defined.
:param data: string
:param pattern: string
:param date_pattern: regex|None
:param date_keys: dict|None
:return: list
"""
if date_pattern and not date_keys:
raise Exception("date_keys is not defined")
requests_dict = re.findall(pattern, data)
requests = []
for request_tuple in requests_dict:
if date_pattern:
str_datetime = __get_iso_datetime(request_tuple[1], date_pattern, date_keys)
else:
str_datetime = request_tuple[1]
requests.append({'IP': request_tuple[0], 'DATETIME': str_datetime,
'METHOD': request_tuple[2], 'ROUTE': request_tuple[3], 'CODE': request_tuple[4],
'REFERRER': request_tuple[5], 'USERAGENT': request_tuple[6]})
return requests
def get_auth_requests(data, pattern, date_pattern=None, date_keys=None):
"""
Analyze data (from the logs) and return list of auth requests formatted as the model (pattern) defined.
:param data: string
:param pattern: string
:param date_pattern:
:param date_keys:
:return: list of dicts
"""
requests_dict = re.findall(pattern, data)
requests = []
for request_tuple in requests_dict:
if date_pattern:
str_datetime = __get_iso_datetime(request_tuple[0], date_pattern, date_keys)
else:
str_datetime = request_tuple[0]
data = analyze_auth_request(request_tuple[2])
data['DATETIME'] = str_datetime
data['SERVICE'] = request_tuple[1]
requests.append(data)
return requests
def analyze_auth_request(request_info):
"""
Analyze request info and returns main data (IP, invalid user, invalid password's user, is_preauth, is_closed)
:param request_info: string
:return: dicts
"""
ipv4 = re.findall(IPv4_REGEX, request_info)
is_preauth = '[preauth]' in request_info.lower()
invalid_user = re.findall(AUTH_USER_INVALID_USER, request_info)
invalid_pass_user = re.findall(AUTH_PASS_INVALID_USER, request_info)
is_closed = 'connection closed by ' in request_info.lower()
return {'IP': ipv4[0] if ipv4 else None,
'INVALID_USER': invalid_user[0] if invalid_user else None,
'INVALID_PASS_USER': invalid_pass_user[0] if invalid_pass_user else None,
'IS_PREAUTH': is_preauth,
'IS_CLOSED': is_closed}
def __get_iso_datetime(str_date, pattern, keys):
"""
Change raw datetime from logs to ISO 8601 format.
:param str_date: string
:param pattern: regex (date_pattern from settings)
:param keys: dict (date_keys from settings)
:return: string
"""
months_dict = {v: k for k, v in enumerate(calendar.month_abbr)}
a_date = re.findall(pattern, str_date)[0]
d_datetime = datetime(int(a_date[keys['year']]) if 'year' in keys else __get_auth_year(),
months_dict[a_date[keys['month']]], int(a_date[keys['day']].strip()),
int(a_date[keys['hour']]), int(a_date[keys['minute']]), int(a_date[keys['second']]))
return d_datetime.isoformat(' ')
def __get_auth_year():
# TODO: Add support for analysis done in different terms
"""
Return the year when the requests happened so there will be no bug if the analyze is done in the new year eve,
the library was designed to be used for hourly analysis.
:return: int
"""
if datetime.now().month == 1 and datetime.now().day == 1 and datetime.now().hour == 0:
return datetime.now().year - 1
else:
return datetime.now().year
class LogsAnalyzer:
def __init__(self, service, data=None, filepath=None):
"""
Constructor, define service (nginx, apache2...), set data or filepath if needed
:param service: string: service name (nginx, apache2...)
:param data: string: data to be filtered if not from a file
:param filepath: string: file path from which the data will be loaded if data isn't defined
and you are not using the default service logs filepath
:return:
"""
self.__filters = []
self.__settings = get_service_settings(service)
self.data = data
if filepath:
self.filepath = filepath
else:
self.filepath = self.__settings['dir_path']+self.__settings['accesslog_filename']
def add_filter(self, filter_pattern, is_casesensitive=True, is_regex=False, is_reverse=False):
"""
Add filter data the filters list
:param filter_pattern: boolean
:param is_casesensitive: boolean
:param is_regex: boolean
:param is_reverse: boolean
:return:
"""
self.__filters.append({
'filter_pattern': filter_pattern,
'is_casesensitive': is_casesensitive,
'is_regex': is_regex,
'is_reverse': is_reverse
})
def add_date_filter(self, minute=datetime.now().minute, hour=datetime.now().hour,
day=datetime.now().day, month=datetime.now().month, year=datetime.now().year):
"""
Set datetime filter
:param minute: int
:param hour: int
:param day: int
:param month: int
:param year: int
"""
date_filter = get_date_filter(self.__settings, minute, hour, day, month, year)
self.add_filter(date_filter)
def get_all_filters(self):
"""
return all defined filters
:return: List
"""
return self.__filters
def get_filter(self, index):
"""
Get a filter data by index
:param index:
:return: Dictionary
"""
return self.__filters[index]
def remove_filter(self, index):
"""
Remove one filter from filters list using it's index
:param index:
:return:
"""
self.__filters.remove(index)
def clear_all_filters(self):
"""
Clear all filters
:return:
"""
self.__filters = []
def check_all_matches(self, line, filter_patterns):
"""
Check if line contains/matches all filter patterns
:param line: String
:param filter_patterns: List of dictionaries containing
:return: boolean
"""
to_return = None
for pattern_data in filter_patterns:
tmp_result = check_match(line=line, **pattern_data)
to_return = tmp_result if to_return is None else (tmp_result and to_return)
return to_return
def filter_all(self):
"""
Apply all defined patterns and return filtered data
:return: string
"""
to_return = ""
if self.data:
for line in self.data.splitlines():
if self.check_all_matches(line, self.__filters):
to_return += line+"\n"
else:
with open(self.filepath, 'r') as file_object:
for line in file_object:
if self.check_all_matches(line, self.__filters):
to_return += line
return to_return
def get_requests(self):
"""
Analyze data (from the logs) and return list of auth requests formatted as the model (pattern) defined.
:return:
"""
data = self.filter_all()
request_pattern = self.__settings['request_model']
date_pattern = self.__settings['date_pattern']
date_keys = self.__settings['date_keys']
if self.__settings['type'] == 'web0':
return get_web_requests(data, request_pattern, date_pattern, date_keys)
elif self.__settings['type'] == 'auth':
return get_auth_requests(data, request_pattern, date_pattern, date_keys)
else:
return None
| ddalu5/logs-analyzer | logs_analyzer/lib.py | Python | apache-2.0 | 11,644 |
import os
from contextlib import contextmanager
from OpenSSL import crypto, SSL
import synapse.common as s_common
from synapse.tests.common import *
import synapse.lib.certdir as s_certdir
class CertDirTest(SynTest):
@contextmanager
def getCertDir(self):
'''
Get a test CertDir object.
Yields:
s_certdir.CertDir: A certdir object based out of a temp directory.
'''
# create a temp folder and make it a cert dir
with self.getTestDir() as dirname:
s_scope.set('testdir', dirname)
cdir = s_certdir.CertDir(path=dirname)
yield cdir
def basic_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
self.nn(cert)
self.nn(key)
# Make sure the certs were generated with the expected number of bits
self.eq(cert.get_pubkey().bits(), cdir.crypto_numbits)
self.eq(key.bits(), cdir.crypto_numbits)
# Make sure the certs were generated with the correct version number
self.eq(cert.get_version(), 2)
# ensure we can sign / verify data with our keypair
buf = b'The quick brown fox jumps over the lazy dog.'
sig = crypto.sign(key, buf, 'sha256')
sig2 = crypto.sign(key, buf + b'wut', 'sha256')
self.none(crypto.verify(cert, sig, buf, 'sha256'))
self.raises(crypto.Error, crypto.verify, cert, sig2, buf, 'sha256')
# ensure that a ssl context using both cert/key match
sslcontext = SSL.Context(SSL.TLSv1_2_METHOD)
sslcontext.use_certificate(cert)
sslcontext.use_privatekey(key)
self.none(sslcontext.check_privatekey())
if cacert:
# Make sure the cert was signed by the CA
self.eq(cert.get_issuer().der(), cacert.get_subject().der())
store = crypto.X509Store()
ctx = crypto.X509StoreContext(store, cert)
# OpenSSL should NOT be able to verify the certificate if its CA is not loaded
store.add_cert(cert)
self.raises(crypto.X509StoreContextError, ctx.verify_certificate) # unable to get local issuer certificate
# Generate a separate CA that did not sign the certificate
try:
cdir.genCaCert('otherca')
except DupFileName:
pass
# OpenSSL should NOT be able to verify the certificate if its CA is not loaded
store.add_cert(cdir.getCaCert('otherca'))
self.raises(crypto.X509StoreContextError, ctx.verify_certificate) # unable to get local issuer certificate
# OpenSSL should be able to verify the certificate, once its CA is loaded
store.add_cert(cacert)
self.none(ctx.verify_certificate()) # valid
def p12_assertions(self, cdir, cert, key, p12, cacert=None):
'''
test basic p12 certificate bundle assumptions
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
p12 (crypto.PKCS12): PKCS12 object to test
cacert (crypto.X509): Corresponding CA cert (optional)
'''
self.nn(p12)
# Pull out the CA cert and keypair data
p12_cacert = None
if cacert:
p12_cacert = p12.get_ca_certificates()
self.nn(p12_cacert)
self.len(1, p12_cacert)
p12_cacert = p12_cacert[0]
self.eq(crypto.dump_certificate(crypto.FILETYPE_ASN1, cacert), crypto.dump_certificate(crypto.FILETYPE_ASN1, p12_cacert))
p12_cert = p12.get_certificate()
p12_key = p12.get_privatekey()
self.basic_assertions(cdir, p12_cert, p12_key, cacert=p12_cacert)
# Make sure that the CA cert and keypair files are the same as the CA cert and keypair contained in the p12 file
self.eq(crypto.dump_certificate(crypto.FILETYPE_ASN1, cert), crypto.dump_certificate(crypto.FILETYPE_ASN1, p12_cert))
self.eq(crypto.dump_privatekey(crypto.FILETYPE_ASN1, key), crypto.dump_privatekey(crypto.FILETYPE_ASN1, p12_key))
def user_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions for a host certificate
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
nextensions = cert.get_extension_count()
exts = {ext.get_short_name(): ext.get_data() for ext in [cert.get_extension(i) for i in range(nextensions)]}
nscertext = crypto.X509Extension(b'nsCertType', False, b'client')
keyuseext = crypto.X509Extension(b'keyUsage', False, b'digitalSignature')
extkeyuseext = crypto.X509Extension(b'extendedKeyUsage', False, b'clientAuth')
basicconext = crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')
self.eq(exts[b'nsCertType'], nscertext.get_data())
self.eq(exts[b'keyUsage'], keyuseext.get_data())
self.eq(exts[b'extendedKeyUsage'], extkeyuseext.get_data())
self.eq(exts[b'basicConstraints'], basicconext.get_data())
self.notin(b'subjectAltName', exts)
def host_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions for a host certificate
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
nextensions = cert.get_extension_count()
exts = {ext.get_short_name(): ext.get_data() for ext in [cert.get_extension(i) for i in range(nextensions)]}
nscertext = crypto.X509Extension(b'nsCertType', False, b'server')
keyuseext = crypto.X509Extension(b'keyUsage', False, b'digitalSignature,keyEncipherment')
extkeyuseext = crypto.X509Extension(b'extendedKeyUsage', False, b'serverAuth')
basicconext = crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')
self.eq(exts[b'nsCertType'], nscertext.get_data())
self.eq(exts[b'keyUsage'], keyuseext.get_data())
self.eq(exts[b'extendedKeyUsage'], extkeyuseext.get_data())
self.eq(exts[b'basicConstraints'], basicconext.get_data())
self.isin(b'subjectAltName', exts)
def test_certdir_cas(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
inter_name = 'testsyn-intermed'
base = cdir._getPathJoin()
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getCaCert(caname))
self.none(cdir.getCaKey(caname))
self.false(cdir.isCaCert(caname))
self.none(cdir.getCaCertPath(caname))
self.none(cdir.getCaKeyPath(caname))
# Generate a self-signed CA =======================================
cdir.genCaCert(caname)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getCaCert(caname), crypto.X509)
self.isinstance(cdir.getCaKey(caname), crypto.PKey)
self.true(cdir.isCaCert(caname))
self.eq(cdir.getCaCertPath(caname), base + '/cas/' + caname + '.crt')
self.eq(cdir.getCaKeyPath(caname), base + '/cas/' + caname + '.key')
# Run basic assertions on the CA keypair
cacert = cdir.getCaCert(caname)
cakey = cdir.getCaKey(caname)
self.basic_assertions(cdir, cacert, cakey)
# Generate intermediate CA ========================================
cdir.genCaCert(inter_name, signas=caname)
# Run basic assertions, make sure that it was signed by the root CA
inter_cacert = cdir.getCaCert(inter_name)
inter_cakey = cdir.getCaKey(inter_name)
self.basic_assertions(cdir, inter_cacert, inter_cakey, cacert=cacert)
def test_certdir_hosts(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
hostname = 'visi.vertex.link'
hostname_unsigned = 'unsigned.vertex.link'
base = cdir._getPathJoin()
cdir.genCaCert(caname)
cacert = cdir.getCaCert(caname)
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getHostCert(hostname_unsigned))
self.none(cdir.getHostKey(hostname_unsigned))
self.false(cdir.isHostCert(hostname_unsigned))
self.none(cdir.getHostCertPath(hostname_unsigned))
self.none(cdir.getHostKeyPath(hostname_unsigned))
self.none(cdir.getHostCaPath(hostname_unsigned))
# Generate a self-signed host keypair =============================
cdir.genHostCert(hostname_unsigned)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getHostCert(hostname_unsigned), crypto.X509)
self.isinstance(cdir.getHostKey(hostname_unsigned), crypto.PKey)
self.true(cdir.isHostCert(hostname_unsigned))
self.eq(cdir.getHostCertPath(hostname_unsigned), base + '/hosts/' + hostname_unsigned + '.crt')
self.eq(cdir.getHostKeyPath(hostname_unsigned), base + '/hosts/' + hostname_unsigned + '.key')
self.none(cdir.getHostCaPath(hostname_unsigned)) # the cert is self-signed, so there is no ca cert
# Run basic assertions on the host keypair
cert = cdir.getHostCert(hostname_unsigned)
key = cdir.getHostKey(hostname_unsigned)
self.basic_assertions(cdir, cert, key)
self.host_assertions(cdir, cert, key)
# Generate a signed host keypair ==================================
cdir.genHostCert(hostname, signas=caname)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getHostCert(hostname), crypto.X509)
self.isinstance(cdir.getHostKey(hostname), crypto.PKey)
self.true(cdir.isHostCert(hostname))
self.eq(cdir.getHostCertPath(hostname), base + '/hosts/' + hostname + '.crt')
self.eq(cdir.getHostKeyPath(hostname), base + '/hosts/' + hostname + '.key')
self.eq(cdir.getHostCaPath(hostname), base + '/cas/' + caname + '.crt') # the cert is signed, so there is a ca cert
# Run basic assertions on the host keypair
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.basic_assertions(cdir, cert, key, cacert=cacert)
self.host_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_users(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
username = 'visi@vertex.link'
username_unsigned = 'unsigned@vertex.link'
base = cdir._getPathJoin()
cdir.genCaCert(caname)
cacert = cdir.getCaCert(caname)
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getUserCert(username_unsigned))
self.none(cdir.getUserKey(username_unsigned))
self.none(cdir.getClientCert(username_unsigned))
self.false(cdir.isUserCert(username_unsigned))
self.false(cdir.isClientCert(username_unsigned))
self.none(cdir.getUserCertPath('nope'))
self.none(cdir.getUserKeyPath('nope'))
self.none(cdir.getUserCaPath('nope'))
self.none(cdir.getUserForHost('nope', 'host.vertex.link'))
# Generate a self-signed user keypair =============================
cdir.genUserCert(username_unsigned)
self.raises(NoSuchFile, cdir.genClientCert, username_unsigned)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getUserCert(username_unsigned), crypto.X509)
self.isinstance(cdir.getUserKey(username_unsigned), crypto.PKey)
self.none(cdir.getClientCert(username_unsigned))
self.true(cdir.isUserCert(username_unsigned))
self.false(cdir.isClientCert(username_unsigned))
self.eq(cdir.getUserCertPath(username_unsigned), base + '/users/' + username_unsigned + '.crt')
self.eq(cdir.getUserKeyPath(username_unsigned), base + '/users/' + username_unsigned + '.key')
self.none(cdir.getUserCaPath(username_unsigned)) # no CA
self.eq(cdir.getUserForHost('unsigned', 'host.vertex.link'), username_unsigned)
# Run basic assertions on the host keypair
cert = cdir.getUserCert(username_unsigned)
key = cdir.getUserKey(username_unsigned)
self.basic_assertions(cdir, cert, key)
self.user_assertions(cdir, cert, key)
# Generate a signed user keypair ==================================
cdir.genUserCert(username, signas=caname)
cdir.genClientCert(username)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getUserCert(username), crypto.X509)
self.isinstance(cdir.getUserKey(username), crypto.PKey)
self.isinstance(cdir.getClientCert(username), crypto.PKCS12)
self.true(cdir.isUserCert(username))
self.true(cdir.isClientCert(username))
self.eq(cdir.getUserCertPath(username), base + '/users/' + username + '.crt')
self.eq(cdir.getUserKeyPath(username), base + '/users/' + username + '.key')
self.eq(cdir.getUserCaPath(username), base + '/cas/' + caname + '.crt')
self.eq(cdir.getUserForHost('visi', 'host.vertex.link'), username)
# Run basic assertions on the host keypair
cert = cdir.getUserCert(username)
key = cdir.getUserKey(username)
p12 = cdir.getClientCert(username)
self.basic_assertions(cdir, cert, key, cacert=cacert)
self.user_assertions(cdir, cert, key, cacert=cacert)
self.p12_assertions(cdir, cert, key, p12, cacert=cacert)
# Test missing files for generating a client cert
os.remove(base + '/users/' + username + '.key')
self.raises(NoSuchFile, cdir.genClientCert, username) # user key
os.remove(base + '/cas/' + caname + '.crt')
self.raises(NoSuchFile, cdir.genClientCert, username) # ca crt
os.remove(base + '/users/' + username + '.crt')
self.raises(NoSuchFile, cdir.genClientCert, username) # user crt
def test_certdir_hosts_sans(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
cdir.genCaCert(caname)
# Host cert with multiple SANs ====================================
hostname = 'visi.vertex.link'
sans = 'DNS:vertex.link,DNS:visi.vertex.link,DNS:vertex.link'
cdir.genHostCert(hostname, signas=caname, sans=sans)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x1f\x82\x0bvertex.link\x82\x10visi.vertex.link') # ASN.1 encoded subjectAltName data
# Host cert with no specified SANs ================================
hostname = 'visi2.vertex.link'
cdir.genHostCert(hostname, signas=caname)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x13\x82\x11visi2.vertex.link') # ASN.1 encoded subjectAltName data
# Self-signed Host cert with no specified SANs ====================
hostname = 'visi3.vertex.link'
cdir.genHostCert(hostname)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x13\x82\x11visi3.vertex.link') # ASN.1 encoded subjectAltName data
def test_certdir_hosts_csr(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
hostname = 'visi.vertex.link'
# Generate CA cert and host CSR
cdir.genCaCert(caname)
cdir.genHostCsr(hostname)
path = cdir._getPathJoin('hosts', hostname + '.csr')
xcsr = cdir._loadCsrPath(path)
# Sign the CSR as the CA
pkey, pcert = cdir.signHostCsr(xcsr, caname)
self.isinstance(pkey, crypto.PKey)
self.isinstance(pcert, crypto.X509)
# Validate the keypair
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.basic_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_users_csr(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
username = 'visi@vertex.link'
# Generate CA cert and user CSR
cdir.genCaCert(caname)
cdir.genUserCsr(username)
path = cdir._getPathJoin('users', username + '.csr')
xcsr = cdir._loadCsrPath(path)
# Sign the CSR as the CA
pkey, pcert = cdir.signUserCsr(xcsr, caname)
self.isinstance(pkey, crypto.PKey)
self.isinstance(pcert, crypto.X509)
# Validate the keypair
cacert = cdir.getCaCert(caname)
cert = cdir.getUserCert(username)
key = cdir.getUserKey(username)
self.basic_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_importfile(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
with self.getTestDir() as testpath:
# File doesn't exist
fpath = s_common.genpath(testpath, 'not_real.crt')
self.raises(NoSuchFile, cdir.importFile, fpath, 'cas')
# File has unsupported extension
fpath = s_common.genpath(testpath, 'coolpic.bmp')
with s_common.genfile(fpath) as fd:
self.raises(BadFileExt, cdir.importFile, fpath, 'cas')
tests = (
('cas', 'coolca.crt'),
('cas', 'coolca.key'),
('hosts', 'coolhost.crt'),
('hosts', 'coolhost.key'),
('users', 'cooluser.crt'),
('users', 'cooluser.key'),
('users', 'cooluser.p12'),
)
data = b'arbitrary data'
for ftype, fname in tests:
srcpath = s_common.genpath(testpath, fname)
dstpath = s_common.genpath(cdir.path, ftype, fname)
with s_common.genfile(srcpath) as fd:
fd.write(b'arbitrary data')
fd.seek(0)
# Make sure the file is not there
self.raises(NoSuchFile, s_common.reqfile, dstpath)
# Import it and make sure it exists
self.none(cdir.importFile(srcpath, ftype))
with s_common.reqfile(dstpath) as dstfd:
self.eq(dstfd.read(), b'arbitrary data')
# Make sure it can't be overwritten
self.raises(FileExists, cdir.importFile, srcpath, ftype)
def test_certdir_valUserCert(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
base = cdir._getPathJoin()
cdir.genCaCert('syntest')
cdir.genCaCert('newp')
cacerts = cdir.getCaCerts()
syntestca = cdir.getCaCert('syntest')
newpca = cdir.getCaCert('newp')
self.raises(crypto.Error, cdir.valUserCert, b'')
cdir.genUserCert('cool')
path = cdir.getUserCertPath('cool')
byts = cdir._getPathBytes(path)
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts)
cdir.genUserCert('cooler', signas='syntest')
path = cdir.getUserCertPath('cooler')
byts = cdir._getPathBytes(path)
self.nn(cdir.valUserCert(byts))
self.nn(cdir.valUserCert(byts, cacerts=(syntestca,)))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=(newpca,))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=())
cdir.genUserCert('coolest', signas='newp')
path = cdir.getUserCertPath('coolest')
byts = cdir._getPathBytes(path)
self.nn(cdir.valUserCert(byts))
self.nn(cdir.valUserCert(byts, cacerts=(newpca,)))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=(syntestca,))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=())
| vivisect/synapse | synapse/tests/test_lib_certdir.py | Python | apache-2.0 | 22,334 |
/*******************************************************************************
* Copyright 2006 - 2012 Vienna University of Technology,
* Department of Software Technology and Interactive Systems, IFS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package eu.scape_project.planning.plato.wfview.full;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import javax.enterprise.context.ConversationScoped;
import javax.inject.Inject;
import javax.inject.Named;
import eu.scape_project.planning.model.Plan;
import eu.scape_project.planning.model.PlanState;
import eu.scape_project.planning.model.PolicyNode;
import eu.scape_project.planning.model.policy.PreservationCase;
import eu.scape_project.planning.plato.bean.TreeHelperBean;
import eu.scape_project.planning.plato.wf.AbstractWorkflowStep;
import eu.scape_project.planning.plato.wf.DefineBasis;
import eu.scape_project.planning.plato.wfview.AbstractView;
import eu.scape_project.planning.policies.OrganisationalPolicies;
/**
* Bean for the viewWorkflow step 'Define Basis'.
*/
@Named("defineBasis")
@ConversationScoped
public class DefineBasisView extends AbstractView implements Serializable {
private static final long serialVersionUID = 8237053627553012469L;
@Inject
private DefineBasis defineBasis;
@Inject
private TreeHelperBean treeHelper;
@Inject
private OrganisationalPolicies policies;
private PreservationCase selectedPreservationCase;
private List<PreservationCase> preservationCases;
public DefineBasisView() {
currentPlanState = PlanState.INITIALISED;
name = "Define Basis";
viewUrl = "/plan/definebasis.jsf";
group = "menu.defineRequirements";
}
/**
* Initializes 'Define Basis' viewWorkflow step, at the moment just the
* triggers.
*
* @see AbstractView#init()
*/
public void init(Plan plan) {
super.init(plan);
policies.init();
preservationCases = policies.getPreservationCases();
selectedPreservationCase = policies.getPreservationCase(plan.getProjectBasis().getSelectedPreservationCaseURI());
// expand all nodes of the displayed policy-tree (if existent)
treeHelper.expandAll(plan.getProjectBasis().getPolicyTree().getRoot());
}
@Override
protected AbstractWorkflowStep getWfStep() {
return defineBasis;
}
/**
* Method responsible for returning the policy-tree appropriate for
* displaying with rich:treeModelRecursiveAdaptor. (This richfaces component
* requires a list of nodes to be returned.)
*
* @return Policy-tree in list representation (for use in
* rich:treeModelRecursiveAdaptor).
*/
public List<PolicyNode> getPolicyRoot() {
List<PolicyNode> l = new ArrayList<PolicyNode>();
if (plan.getProjectBasis().getPolicyTree() != null) {
l.add(plan.getProjectBasis().getPolicyTree().getRoot());
}
return l;
}
// ---------- getter/setter ----------
public OrganisationalPolicies getPolicies() {
return policies;
}
public void setPolicies(OrganisationalPolicies policies) {
this.policies = policies;
}
public String getSelectedPreservationCaseName(){
if (selectedPreservationCase == null) {
return null;
} else {
return selectedPreservationCase.getName();
}
}
public void setSelectedPreservationCaseName(String name) {
selectedPreservationCase = null;
for (PreservationCase preservationCase : preservationCases) {
if (preservationCase.getName().equals(name)) {
selectedPreservationCase = preservationCase;
}
}
}
public void useSelectedPreservationCase(){
if (selectedPreservationCase != null) {
plan.getProjectBasis().applyPreservationCase(selectedPreservationCase);
}
}
public TreeHelperBean getTreeHelper() {
return treeHelper;
}
public List<PreservationCase> getPreservationCases() {
return preservationCases;
}
public PreservationCase getSelectedPreservationCase() {
return selectedPreservationCase;
}
}
| openpreserve/plato | plato/src/main/java/eu/scape_project/planning/plato/wfview/full/DefineBasisView.java | Java | apache-2.0 | 5,051 |
/*
* Copyright (C) 2017 Bilibili
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.bilibili.boxing_impl.ui;
import android.content.Intent;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentStatePagerAdapter;
import android.support.v4.view.ViewPager;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.Button;
import android.widget.ProgressBar;
import android.widget.Toast;
import com.bilibili.boxing.AbsBoxingViewActivity;
import com.bilibili.boxing.Boxing;
import com.bilibili.boxing.model.BoxingManager;
import com.bilibili.boxing.model.entity.BaseMedia;
import com.bilibili.boxing.model.entity.impl.ImageMedia;
import com.bilibili.boxing.model.task.IMediaTask;
import com.bilibili.boxing_impl.BoxingResHelper;
import com.bilibili.boxing_impl.R;
import com.bilibili.boxing_impl.view.HackyViewPager;
import java.util.ArrayList;
import java.util.List;
/**
* An Activity to show raw image by holding {@link BoxingViewFragment}.
*
* @author ChenSL
*/
public class BoxingViewActivity extends AbsBoxingViewActivity {
public static final String EXTRA_TYPE_BACK = "com.bilibili.boxing_impl.ui.BoxingViewActivity.type_back";
HackyViewPager mGallery;
ProgressBar mProgressBar;
private boolean mNeedEdit;
private boolean mNeedLoading;
private boolean mFinishLoading;
private boolean mNeedAllCount = true;
private int mCurrentPage;
private int mTotalCount;
private int mStartPos;
private int mPos;
private int mMaxCount;
private String mAlbumId;
private Toolbar mToolbar;
private ImagesAdapter mAdapter;
private ImageMedia mCurrentImageItem;
private Button mOkBtn;
private ArrayList<BaseMedia> mImages;
private ArrayList<BaseMedia> mSelectedImages;
private MenuItem mSelectedMenuItem;
@Override
public void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_boxing_view);
createToolbar();
initData();
initView();
startLoading();
}
private void createToolbar() {
mToolbar = (Toolbar) findViewById(R.id.nav_top_bar);
setSupportActionBar(mToolbar);
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
mToolbar.setNavigationOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
onBackPressed();
}
});
getSupportActionBar().setDisplayShowTitleEnabled(false);
}
private void initData() {
mSelectedImages = getSelectedImages();
mAlbumId = getAlbumId();
mStartPos = getStartPos();
mNeedLoading = BoxingManager.getInstance().getBoxingConfig().isNeedLoading();
mNeedEdit = BoxingManager.getInstance().getBoxingConfig().isNeedEdit();
mMaxCount = getMaxCount();
mImages = new ArrayList<>();
if (!mNeedLoading && mSelectedImages != null) {
mImages.addAll(mSelectedImages);
}
}
private void initView() {
mAdapter = new ImagesAdapter(getSupportFragmentManager());
mOkBtn = (Button) findViewById(R.id.image_items_ok);
mGallery = (HackyViewPager) findViewById(R.id.pager);
mProgressBar = (ProgressBar) findViewById(R.id.loading);
mGallery.setAdapter(mAdapter);
mGallery.addOnPageChangeListener(new OnPagerChangeListener());
if (!mNeedEdit) {
View chooseLayout = findViewById(R.id.item_choose_layout);
chooseLayout.setVisibility(View.GONE);
} else {
setOkTextNumber();
mOkBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
finishByBackPressed(false);
}
});
}
}
private void setOkTextNumber() {
if (mNeedEdit) {
int selectedSize = mSelectedImages.size();
int size = Math.max(mSelectedImages.size(), mMaxCount);
mOkBtn.setText(getString(R.string.boxing_image_preview_ok_fmt, String.valueOf(selectedSize)
, String.valueOf(size)));
mOkBtn.setEnabled(selectedSize > 0);
}
}
private void finishByBackPressed(boolean value) {
Intent intent = new Intent();
intent.putParcelableArrayListExtra(Boxing.EXTRA_SELECTED_MEDIA, mSelectedImages);
intent.putExtra(EXTRA_TYPE_BACK, value);
setResult(RESULT_OK, intent);
finish();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
if (mNeedEdit) {
getMenuInflater().inflate(R.menu.activity_boxing_image_viewer, menu);
mSelectedMenuItem = menu.findItem(R.id.menu_image_item_selected);
if (mCurrentImageItem != null) {
setMenuIcon(mCurrentImageItem.isSelected());
} else {
setMenuIcon(false);
}
return true;
}
return false;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
if (id == R.id.menu_image_item_selected) {
if (mCurrentImageItem == null) {
return false;
}
if (mSelectedImages.size() >= mMaxCount && !mCurrentImageItem.isSelected()) {
String warning = getString(R.string.boxing_max_image_over_fmt, mMaxCount);
Toast.makeText(this, warning, Toast.LENGTH_SHORT).show();
return true;
}
if (mCurrentImageItem.isSelected()) {
cancelImage();
} else {
if (!mSelectedImages.contains(mCurrentImageItem)) {
if (mCurrentImageItem.isGifOverSize()) {
Toast.makeText(getApplicationContext(), R.string.boxing_gif_too_big, Toast.LENGTH_SHORT).show();
return true;
}
mCurrentImageItem.setSelected(true);
mSelectedImages.add(mCurrentImageItem);
}
}
setOkTextNumber();
setMenuIcon(mCurrentImageItem.isSelected());
return true;
}
return super.onOptionsItemSelected(item);
}
private void cancelImage() {
if (mSelectedImages.contains(mCurrentImageItem)) {
mSelectedImages.remove(mCurrentImageItem);
}
mCurrentImageItem.setSelected(false);
}
private void setMenuIcon(boolean isSelected) {
if (mNeedEdit) {
mSelectedMenuItem.setIcon(isSelected ? BoxingResHelper.getMediaCheckedRes(): BoxingResHelper.getMediaUncheckedRes());
}
}
@Override
public void startLoading() {
if (!mNeedLoading) {
mCurrentImageItem = (ImageMedia) mSelectedImages.get(mStartPos);
if (mStartPos > 0 && mStartPos < mSelectedImages.size()) {
mGallery.setCurrentItem(mStartPos, false);
}
mToolbar.setTitle(getString(R.string.boxing_image_preview_title_fmt, String.valueOf(mStartPos + 1)
, String.valueOf(mSelectedImages.size())));
mProgressBar.setVisibility(View.GONE);
mGallery.setVisibility(View.VISIBLE);
mAdapter.setMedias(mImages);
} else {
loadMedia(mAlbumId, mStartPos, mCurrentPage);
mAdapter.setMedias(mImages);
}
}
private void loadMedia(String albumId, int startPos, int page) {
this.mPos = startPos;
loadMedias(page, albumId);
}
@Override
public void showMedia(@Nullable List<BaseMedia> medias, int totalCount) {
if (medias == null || totalCount <= 0) {
return;
}
mImages.addAll(medias);
mAdapter.notifyDataSetChanged();
checkSelectedMedia(mImages, mSelectedImages);
setupGallery();
if (mToolbar != null && mNeedAllCount) {
mToolbar.setTitle(getString(R.string.boxing_image_preview_title_fmt,
String.valueOf(++mPos), String.valueOf(totalCount)));
mNeedAllCount = false;
}
loadOtherPagesInAlbum(totalCount);
}
private void setupGallery() {
int startPos = mStartPos;
if (mGallery == null || startPos < 0) {
return;
}
if (startPos < mImages.size() && !mFinishLoading) {
mGallery.setCurrentItem(mStartPos, false);
mCurrentImageItem = (ImageMedia) mImages.get(startPos);
mProgressBar.setVisibility(View.GONE);
mGallery.setVisibility(View.VISIBLE);
mFinishLoading = true;
invalidateOptionsMenu();
} else if (startPos >= mImages.size()) {
mProgressBar.setVisibility(View.VISIBLE);
mGallery.setVisibility(View.GONE);
}
}
private void loadOtherPagesInAlbum(int totalCount) {
mTotalCount = totalCount;
if (mCurrentPage <= (mTotalCount / IMediaTask.PAGE_LIMIT)) {
mCurrentPage++;
loadMedia(mAlbumId, mStartPos, mCurrentPage);
}
}
@Override
protected void onSaveInstanceState(Bundle outState) {
if (mSelectedImages != null) {
outState.putParcelableArrayList(Boxing.EXTRA_SELECTED_MEDIA, mSelectedImages);
}
outState.putString(Boxing.EXTRA_ALBUM_ID, mAlbumId);
super.onSaveInstanceState(outState);
}
@Override
public void onBackPressed() {
finishByBackPressed(true);
}
private class ImagesAdapter extends FragmentStatePagerAdapter {
private ArrayList<BaseMedia> mMedias;
ImagesAdapter(FragmentManager fm) {
super(fm);
}
@Override
public Fragment getItem(int i) {
return BoxingRawImageFragment.newInstance((ImageMedia) mMedias.get(i));
}
@Override
public int getCount() {
return mMedias == null ? 0 : mMedias.size();
}
public void setMedias(ArrayList<BaseMedia> medias) {
this.mMedias = medias;
notifyDataSetChanged();
}
}
private class OnPagerChangeListener extends ViewPager.SimpleOnPageChangeListener {
@Override
public void onPageSelected(int position) {
if (mToolbar != null && position < mImages.size()) {
mToolbar.setTitle(getString(R.string.boxing_image_preview_title_fmt, String.valueOf(position + 1)
, mNeedLoading ? String.valueOf(mTotalCount) : String.valueOf(mImages.size())));
mCurrentImageItem = (ImageMedia) mImages.get(position);
invalidateOptionsMenu();
}
}
}
}
| xiyangyuge/boxing | boxing-impl/src/main/java/com/bilibili/boxing_impl/ui/BoxingViewActivity.java | Java | apache-2.0 | 11,625 |
#include "wormhole_predefine.h"
#include "wormhole_proxyprocess.h"
#include "wormhole_application.h"
#include "wormhole_stat_define.h"
//===================================================================================================
Interface_WH_Proxy::Interface_WH_Proxy()
{
}
Interface_WH_Proxy::~Interface_WH_Proxy()
{
}
Interface_WH_Proxy::PROXY_TYPE Interface_WH_Proxy::str_to_proxytype(const char *str_proxy)
{
if (0 == strcasecmp(str_proxy, "ECHO"))
{
return PROXY_TYPE_ECHO;
}
else if (0 == strcasecmp(str_proxy, "TRANSMIT"))
{
return PROXY_TYPE_TRANSMIT;
}
else if (0 == strcasecmp(str_proxy, "BROADCAST"))
{
return PROXY_TYPE_BROADCAST;
}
else if (0 == strcasecmp(str_proxy, "MODULO_UID"))
{
return PROXY_TYPE_MODULO_UID;
}
else if (0 == strcasecmp(str_proxy, "MODULO_SENDSVCID"))
{
return PROXY_TYPE_MODULO_SENDSVCID;
}
else
{
return INVALID_PROXY_TYPE;
}
}
//代理接口制造的工厂
Interface_WH_Proxy *Interface_WH_Proxy::create_proxy_factory(PROXY_TYPE proxytype)
{
Interface_WH_Proxy *tmpintface = NULL;
ZCE_LOG(RS_INFO, "Interface_Proxy_Process::CreatePorxyFactory PROXY_TYPE: %d.", proxytype);
switch (proxytype)
{
// 回显服务器
case PROXY_TYPE_ECHO:
{
tmpintface = new Echo_Proxy_Process();
break;
}
// 透转转发的方式
case PROXY_TYPE_TRANSMIT:
{
tmpintface = new Transmit_Proxy();
break;
}
// 对数据进行拷贝分发广播
case PROXY_TYPE_BROADCAST:
{
tmpintface = new Broadcast_ProxyProcess();
break;
}
// DBPROXY的模式,采用UIN取模的方式的到服务器的ID
case PROXY_TYPE_MODULO_UID:
{
tmpintface = new Modulo_ProxyProcess(Modulo_ProxyProcess::MODULO_UID);
break;
}
// DBPROXY的模式,采用APPID和UIN的方式的到服务器的ID
case PROXY_TYPE_MODULO_SENDSVCID:
{
tmpintface = new Modulo_ProxyProcess(Modulo_ProxyProcess::MODULO_SENDSVC_ID);
break;
}
default:
{
// 错误
ZCE_LOG(RS_ERROR, "Error Proxy Type define. Please check you code. ");
return NULL;
}
}
return tmpintface;
}
int Interface_WH_Proxy::init_proxy_instance()
{
// int ret =0;
// 初始化MMAP内存的PIPE
zerg_mmap_pipe_ = Soar_MMAP_BusPipe::instance();
return 0;
}
//读取配置
int Interface_WH_Proxy::get_proxy_config(const ZCE_Conf_PropertyTree *conf_tree)
{
ZCE_UNUSED_ARG(conf_tree);
return 0;
}
//===================================================================================================
Echo_Proxy_Process::Echo_Proxy_Process()
{
}
Echo_Proxy_Process::~Echo_Proxy_Process()
{
}
int Echo_Proxy_Process::get_proxy_config(const ZCE_Conf_PropertyTree *conf_tree)
{
//
int ret = Interface_WH_Proxy::get_proxy_config(conf_tree);
if (ret != 0)
{
return ret;
}
return 0;
}
int Echo_Proxy_Process::process_proxy(Zerg_App_Frame *proc_frame)
{
ZCE_LOGMSG_DEBUG(RS_DEBUG, "Receive a echo frame to process,"
"send svr:[%u|%u], "
"recv svr:[%u|%u], "
"frame_uin:%u, "
"frame_cmd:%u, "
"frame_len:%u. ",
proc_frame->send_service_.services_type_,
proc_frame->send_service_.services_id_,
proc_frame->recv_service_.services_type_,
proc_frame->recv_service_.services_id_,
proc_frame->frame_uid_,
proc_frame->frame_command_,
proc_frame->frame_length_);
int ret = 0;
// 内部处理的命令
bool bsnderr;
if (proc_frame->is_internal_process(bsnderr) == true)
{
ZCE_LOG(RS_DEBUG, "Receive a internal command, frame_uin:%u, frame_command:%u. ",
proc_frame->frame_uid_, proc_frame->frame_command_);
return 0;
}
// 返回这个帧
proc_frame->exchange_rcvsnd_svcid();
ret = zerg_mmap_pipe_->push_back_sendpipe(proc_frame);
//
if (ret != 0)
{
return SOAR_RET::ERR_PROXY_SEND_PIPE_IS_FULL;
}
ZCE_LOGMSG_DEBUG(RS_DEBUG, "Echo to [%u|%u], frame_uin:%u, frame_command:%u, frame_len:%u. ",
proc_frame->recv_service_.services_type_,
proc_frame->recv_service_.services_id_,
proc_frame->frame_uid_,
proc_frame->frame_command_,
proc_frame->frame_length_);
return 0;
}
//===================================================================================================
//直接进行转发处理,不对数据帧进行任何处理
Transmit_Proxy::Transmit_Proxy()
{
}
Transmit_Proxy::~Transmit_Proxy()
{
}
int Transmit_Proxy::get_proxy_config(const ZCE_Conf_PropertyTree *conf_tree)
{
//
int ret = Interface_WH_Proxy::get_proxy_config(conf_tree);
if (ret != 0)
{
return ret;
}
return 0;
}
int Transmit_Proxy::process_proxy(Zerg_App_Frame *proc_frame)
{
ZCE_LOGMSG_DEBUG(RS_DEBUG, "Receive a transmit frame to process,"
"send svr:[%u|%u], "
"recv svr:[%u|%u], "
"frame_uin:%u, "
"frame_cmd:%u, "
"frame_len:%u. ",
proc_frame->send_service_.services_type_,
proc_frame->send_service_.services_id_,
proc_frame->recv_service_.services_type_,
proc_frame->recv_service_.services_id_,
proc_frame->frame_uid_,
proc_frame->frame_command_,
proc_frame->frame_length_);
int ret = 0;
// 内部处理的命令,跳过
bool bsnderr;
if (proc_frame->is_internal_process(bsnderr) == true)
{
ZCE_LOG(RS_DEBUG, "Receive a internal command, frame_uin:%u, frame_command:%u. ",
proc_frame->frame_uid_, proc_frame->frame_command_);
return 0;
}
ret = zerg_mmap_pipe_->push_back_sendpipe(proc_frame);
//
if (ret != 0)
{
return SOAR_RET::ERR_PROXY_SEND_PIPE_IS_FULL;
}
ZCE_LOGMSG_DEBUG(RS_DEBUG, "Transmit to [%u|%u], frame_uin:%u, frame_command:%u, frame_len:%u, trans_id[%u]. ",
proc_frame->recv_service_.services_type_,
proc_frame->recv_service_.services_id_,
proc_frame->frame_uid_,
proc_frame->frame_command_,
proc_frame->frame_length_,
proc_frame->transaction_id_);
return 0;
}
//===================================================================================================
//将数据复制转发给所有配置的服务器
Broadcast_ProxyProcess::Broadcast_ProxyProcess() :
Interface_WH_Proxy(),
broadcast_svctype_(0),
broadcast_svcnum_(0)
{
memset(broadcast_svcid_, 0, sizeof(broadcast_svcid_));
}
Broadcast_ProxyProcess::~Broadcast_ProxyProcess()
{
}
int Broadcast_ProxyProcess::get_proxy_config(const ZCE_Conf_PropertyTree *conf_tree)
{
//
int ret = Interface_WH_Proxy::get_proxy_config(conf_tree);
if (ret != 0)
{
return ret;
}
ret = conf_tree->path_get_leaf("BROADCAST_CFG", "BROADCAST_SVCTYPE",
broadcast_svctype_);
if (0 != ret || broadcast_svctype_ == SERVICES_ID::INVALID_SERVICES_TYPE)
{
SOAR_CFG_READ_FAIL(RS_ERROR);
return SOAR_RET::ERROR_GET_CFGFILE_CONFIG_FAIL;
}
ret = conf_tree->path_get_leaf("BROADCAST_CFG", "BROADCAST_NUM",
broadcast_svcnum_);
ZCE_LOG(RS_DEBUG, "Broadcast service num: %u.", broadcast_svcnum_);
if (0 != ret || broadcast_svcnum_ == 0 || broadcast_svcnum_ > MAX_NUM_COPY_SVC)
{
SOAR_CFG_READ_FAIL(RS_ERROR);
return SOAR_RET::ERROR_GET_CFGFILE_CONFIG_FAIL;
}
//注意是从1开始
for (size_t i = 0; i < broadcast_svcnum_; ++i)
{
ret = conf_tree->pathseq_get_leaf("BROADCAST_CFG", "BROADCAST_SVCID_", i + 1, broadcast_svcid_[i]);
ZCE_LOG(RS_DEBUG, "Broadcast service id: %hu.%u.", broadcast_svctype_, broadcast_svcid_[i]);
if (0 != ret)
{
SOAR_CFG_READ_FAIL(RS_ERROR);
return SOAR_RET::ERROR_GET_CFGFILE_CONFIG_FAIL;
}
}
// 检查是否有重复的ID
std::list <uint32_t> check_list;
for (size_t i = 0; i < broadcast_svcnum_; ++i)
{
check_list.push_back(broadcast_svcid_[i]);
}
// 不讲求效率的地方
check_list.sort();
check_list.unique();
if (check_list.size() != broadcast_svcnum_)
{
ZCE_LOG(RS_ERROR, "Cfg file have repeat svc id,Please check.");
SOAR_CFG_READ_FAIL(RS_ERROR);
return SOAR_RET::ERROR_GET_CFGFILE_CONFIG_FAIL;
}
return 0;
}
//
int Broadcast_ProxyProcess::process_proxy(Zerg_App_Frame *proc_frame)
{
int ret = 0;
// 输出包头,看看
proc_frame->dumpoutput_framehead("[FROM RECV FRAME]", RS_DEBUG);
// 内部处理的命令,跳过
bool bsnderr;
if (proc_frame->is_internal_process(bsnderr) == true)
{
ZCE_LOG(RS_DEBUG, "Receive a internal command, frame_uin:%u, frame_command:%u. ",
proc_frame->frame_uid_, proc_frame->frame_command_);
return 0;
}
// 这样处理是否好,我不知道,
if (proc_frame->recv_service_.services_type_ != broadcast_svctype_)
{
ZCE_LOG(RS_ERROR, "Can't Porcess services_type_%u. ", proc_frame->recv_service_.services_type_);
return SOAR_RET::ERR_PROXY_RCVSVC_TYPE_ERROR;
}
// 复制生成N个帧,转发到不同的服务器
for (size_t i = 0; i < broadcast_svcnum_; ++i)
{
// 修改为新的ID
proc_frame->recv_service_.services_id_ = broadcast_svcid_[i];
ret = zerg_mmap_pipe_->push_back_sendpipe(proc_frame);
//
if (ret != 0)
{
return ret;
}
ZCE_LOGMSG_DEBUG(RS_DEBUG, "Copy to [%u|%u], frame_uin:%u, frame_command:%u, frame_len:%u, trans_id[%u]. ",
proc_frame->recv_service_.services_type_,
proc_frame->recv_service_.services_id_,
proc_frame->frame_uid_,
proc_frame->frame_command_,
proc_frame->frame_length_,
proc_frame->transaction_id_);
}
return 0;
}
//===================================================================================================
Modulo_ProxyProcess::Modulo_ProxyProcess(MODULO_TYPE modulo_type) :
Interface_WH_Proxy(),
modulo_type_(modulo_type)
{
memset(modulo_svcid_, 0, sizeof(modulo_svcid_));
}
Modulo_ProxyProcess::~Modulo_ProxyProcess()
{
}
int Modulo_ProxyProcess::get_proxy_config(const ZCE_Conf_PropertyTree *conf_tree)
{
//
int ret = Interface_WH_Proxy::get_proxy_config(conf_tree);
if (ret != 0)
{
return ret;
}
ret = conf_tree->path_get_leaf("MODULO_CFG", "MODULO_SVCTYPE",
modulo_svctype_);
if (0 != ret || modulo_svctype_ == SERVICES_ID::INVALID_SERVICES_TYPE)
{
SOAR_CFG_READ_FAIL(RS_ERROR);
return SOAR_RET::ERROR_GET_CFGFILE_CONFIG_FAIL;
}
ret = conf_tree->path_get_leaf("MODULO_CFG", "MODULO_NUM",
modulo_svcnum_);
ZCE_LOG(RS_DEBUG, "Modulo service num: %u.", modulo_svcnum_);
if (0 != ret || modulo_svcnum_ == 0 || modulo_svcnum_ > MAX_NUM_MODULO_SVC)
{
SOAR_CFG_READ_FAIL(RS_ERROR);
return SOAR_RET::ERROR_GET_CFGFILE_CONFIG_FAIL;
}
//注意是从1开始
for (size_t i = 0; i < modulo_svcnum_; ++i)
{
ret = conf_tree->pathseq_get_leaf("MODULO_CFG", "MODULO_SVCID_", i + 1, modulo_svcid_[i]);
ZCE_LOG(RS_DEBUG, "Broadcast service id: %hu.%u.", modulo_svctype_, modulo_svcid_[i]);
if (0 != ret)
{
SOAR_CFG_READ_FAIL(RS_ERROR);
return SOAR_RET::ERROR_GET_CFGFILE_CONFIG_FAIL;
}
}
// 检查是否有重复的ID
std::list <uint32_t> check_list;
for (size_t i = 0; i < modulo_svcnum_; ++i)
{
check_list.push_back(modulo_svcid_[i]);
}
// 不讲求效率的地方
check_list.sort();
check_list.unique();
if (check_list.size() != modulo_svcnum_)
{
ZCE_LOG(RS_ERROR, "Cfg file have repeat svc id,Please check.");
SOAR_CFG_READ_FAIL(RS_ERROR);
return SOAR_RET::ERROR_GET_CFGFILE_CONFIG_FAIL;
}
return 0;
}
//
int Modulo_ProxyProcess::process_proxy(Zerg_App_Frame *proc_frame)
{
int ret = 0;
// 输出包头,看看
proc_frame->dumpoutput_framehead("[FROM RECV FRAME]", RS_DEBUG);
// 内部处理的命令,跳过
bool bsnderr;
if (proc_frame->is_internal_process(bsnderr) == true)
{
ZCE_LOG(RS_DEBUG, "Receive a internal command, frame_uin:%u, frame_command:%u. ",
proc_frame->frame_uid_, proc_frame->frame_command_);
return 0;
}
// 这样处理是否好,我不知道,
if (proc_frame->recv_service_.services_type_ != modulo_svctype_)
{
ZCE_LOG(RS_ERROR, "Can't Porcess services_type_%u. ", proc_frame->recv_service_.services_type_);
return SOAR_RET::ERR_PROXY_RCVSVC_TYPE_ERROR;
}
uint32_t mod_number = 0;
if ( MODULO_UID == modulo_type_)
{
mod_number = proc_frame->frame_uid_;
}
else if (MODULO_SENDSVC_ID == modulo_type_)
{
mod_number = proc_frame->send_service_.services_id_;
}
else
{
ZCE_ASSERT(true);
}
proc_frame->recv_service_.services_id_ = modulo_svcid_[mod_number % modulo_svcnum_];
ret = zerg_mmap_pipe_->push_back_sendpipe(proc_frame);
//
if (ret != 0)
{
return ret;
}
ZCE_LOGMSG_DEBUG(RS_DEBUG, "Copy to [%u|%u], frame_uin:%u, frame_command:%u, frame_len:%u, trans_id[%u]. ",
proc_frame->recv_service_.services_type_,
proc_frame->recv_service_.services_id_,
proc_frame->frame_uid_,
proc_frame->frame_command_,
proc_frame->frame_length_,
proc_frame->transaction_id_);
return 0;
}
//const DBModalMGRouteItem *DBModalMGProxyInfo::find_route(unsigned int uid)
//{
// if (route_cfg_.size() == 0)
// {
// // 没有配置路由,这一定是个错误
// ZCE_LOG(RS_ERROR,"[%s] no route configed", __ZCE_FUNC__);
// return NULL;
// }
//
// DBModalMGRouteItem tmp;
// // 取uid的低16位作为hash值,应该够随机的
// tmp.hash_ = (uid & 0xFFFF);
//
// std::vector<DBModalMGRouteItem>::iterator iter
// = std::upper_bound(route_cfg_.begin(), route_cfg_.end(), tmp);
//
// // 如果指向路由表的末尾,那么实际上应该是是路由表的第一项
// if (iter == route_cfg_.end())
// {
// iter = route_cfg_.begin();
// }
//
// return &(*iter);
//}
////===================================================================================================
//
////按照DB取模进行Proxy转发,用于DBServer和金融服务器
//DBModalProxyProcess::DBModalProxyProcess():
// Interface_WH_Proxy()
//{
//}
//
//DBModalProxyProcess::~DBModalProxyProcess()
//{
// std::map<unsigned short, DBModalProxyInfo*>::iterator iter = dbmodal_proxy_map_.begin();
//
// for (; iter != dbmodal_proxy_map_.end(); iter++)
// {
// // 释放分配的内存
// delete iter->second;
// iter->second = NULL;
// }
//}
//
//
//int DBModalProxyProcess::get_proxy_config(const ZCE_Conf_PropertyTree *conf_tree)
//{
//
// int ret = 0;
//
// //得到过滤得命令
// ret = Interface_WH_Proxy::get_proxy_config(conf_tree);
// if (ret != 0)
// {
// return ret;
// }
//
//
//
// for (unsigned int i = 0; i < cfg->dbmodal_info_.route_num_; i++)
// {
// DBModalProxyInfo *dbmodal_proxy_info = new DBModalProxyInfo();
// conf_proxysvr::RouteInfo *route_info = &(cfg->dbmodal_info_.route_info_[i]);
//
// dbmodal_proxy_info->distribute_module_ = route_info->distribute_module_;
// dbmodal_proxy_info->distribute_offset_ = route_info->distribute_offset_;
// dbmodal_proxy_info->router_svr_type_ = route_info->svr_type_;
//
// ZCE_LOG(RS_INFO,"[DBModalProxy] route_svr_type:%u, distribute_offset:%u, distribute_module:%u",
// dbmodal_proxy_info->router_svr_type_,
// dbmodal_proxy_info->distribute_offset_,
// dbmodal_proxy_info->distribute_module_);
//
// dbmodal_proxy_info->normal_router_cfg_.resize(dbmodal_proxy_info->distribute_module_);
// dbmodal_proxy_info->clone_router_cfg_.resize(dbmodal_proxy_info->distribute_module_);
//
// for (unsigned int k = 0; k < dbmodal_proxy_info->distribute_module_; k++)
// {
// dbmodal_proxy_info->normal_router_cfg_[k] = route_info->svr_id_[k].nomal_service_id_;
//
// if (route_info->svr_id_[k].clone_service_id_)
// {
// dbmodal_proxy_info->clone_router_cfg_[k] = route_info->svr_id_[k].clone_service_id_;
// }
// else
// {
// dbmodal_proxy_info->clone_router_cfg_[k] = SERVICES_ID::INVALID_SERVICES_ID;
// }
//
// ZCE_LOG(RS_INFO,"[DBModalProxy] normal service:%u|%u, clone service:%u|%u, passby service:%u|%u",
// dbmodal_proxy_info->router_svr_type_, dbmodal_proxy_info->normal_router_cfg_[k],
// dbmodal_proxy_info->router_svr_type_, dbmodal_proxy_info->clone_router_cfg_[k]);
// }
//
// dbmodal_proxy_map_.insert(std::make_pair<unsigned short, DBModalProxyInfo*>
// (dbmodal_proxy_info->router_svr_type_, dbmodal_proxy_info));
// }
//
// return 0;
//}
//
////要处理的帧
//int DBModalProxyProcess::process_proxy(Zerg_App_Frame *proc_frame)
//{
// ZCE_LOG(RS_DEBUG,"Receive a dbmode frame to process,"
// "send svr:[%u|%u], "
// "recv svr:[%u|%u], "
// "frame_uid:%u, "
// "frame_cmd:%u, "
// "frame_len:%u. ",
// proc_frame->send_service_.services_type_,
// proc_frame->send_service_.services_id_,
// proc_frame->recv_service_.services_type_,
// proc_frame->recv_service_.services_id_,
// proc_frame->frame_uid_,
// proc_frame->frame_command_,
// proc_frame->frame_length_);
//
// int ret = 0;
// // 内部处理的命令,跳过
// bool bsnderr;
//
// if (proc_frame->is_internal_process(bsnderr) == true)
// {
// ZCE_LOG(RS_INFO,"Receive a internal command, frame_uin:%u, frame_command:%u. ",
// proc_frame->frame_uid_, proc_frame->frame_command_);
// return 0;
// }
//
// std::map<unsigned short, DBModalProxyInfo*>::iterator iter =
// dbmodal_proxy_map_.find(proc_frame->recv_service_.services_type_);
//
// if (iter != dbmodal_proxy_map_.end())
// {
// // 要转发的类型已配置, 获取对应路由信息
// DBModalProxyInfo *dbmodal_proxy_info = iter->second;
//
// //------------------------------------------------------------------
// unsigned int uid = proc_frame->frame_uid_;
//
// // 过滤掉uid为0的数据
// if (uid == 0 )
// {
//
// proc_frame->dumpoutput_framehead("[FROM RECV FRAME]", RS_ERROR);
//
// Soar_Stat_Monitor::instance()->increase_once(WORMHOLE_TRANS_PKG_ERROR);
//
// return SOAR_RET::ERROR_APPFRAME_ERROR;
// }
//
// // 关键代码处
// unsigned int mod =
// (uid >> dbmodal_proxy_info->distribute_offset_) % dbmodal_proxy_info->distribute_module_;
//
// // ------------------------------------------------------------------
// proc_frame->recv_service_.services_type_ = dbmodal_proxy_info->router_svr_type_;
// proc_frame->recv_service_.services_id_ = dbmodal_proxy_info->normal_router_cfg_[mod];
//
// // 日志调整为DEBUG级别的
// ZCE_LOG(RS_DEBUG,"Send to main services [%u|%u], frame_uin:%u, "
// "frame_command:%u, frame_len:%u, trans_id[%u]. ",
// proc_frame->recv_service_.services_type_,
// proc_frame->recv_service_.services_id_,
// proc_frame->frame_uid_,
// proc_frame->frame_command_,
// proc_frame->frame_length_,
// proc_frame->transaction_id_);
//
// // 只生成了一个帧
// int ret = zerg_mmap_pipe_->push_back_sendpipe(proc_frame);
//
// //
// if (ret != 0)
// {
// return SOAR_RET::ERR_PROXY_SEND_PIPE_IS_FULL;
// }
//
// // 如果有备份路由,则将数据转发给一个备份的代理
// if (dbmodal_proxy_info->clone_router_cfg_[mod] != SERVICES_ID::INVALID_SERVICES_ID )
// {
//
// proc_frame->recv_service_.services_id_ = dbmodal_proxy_info->clone_router_cfg_[mod];
// ZCE_LOG(RS_INFO,"Send to backup services [%u|%u], frame_uin:%u,"
// " frame_command:%u, frame_len:%u, back trans_id[%u]. ",
// proc_frame->recv_service_.services_type_,
// proc_frame->recv_service_.services_id_,
// proc_frame->frame_uid_,
// proc_frame->frame_command_,
// proc_frame->frame_length_,
// proc_frame->backfill_trans_id_);
//
// ret = zerg_mmap_pipe_->push_back_sendpipe(proc_frame);
//
// if (ret != 0)
// {
// return SOAR_RET::ERR_PROXY_SEND_PIPE_IS_FULL;
// }
// }
//
// }
// // 另外一个路由方向的事情,直接透传
// else
// {
// //
// // 加一条日志,方便跟踪
// ZCE_LOG(RS_INFO,"Send back [%u|%u], frame_uin:%u, frame_command:%u, frame_len:%u. ",
// proc_frame->recv_service_.services_type_,
// proc_frame->recv_service_.services_id_,
// proc_frame->frame_uid_,
// proc_frame->frame_command_,
// proc_frame->frame_length_);
//
// ret = zerg_mmap_pipe_->push_back_sendpipe(proc_frame);
//
// //
// if (ret != 0)
// {
// return SOAR_RET::ERR_PROXY_SEND_PIPE_IS_FULL;
// }
// }
//
// return 0;
//}
| sailzeng/zcelib | src/commsvr/wormholesvrd/wormhole_proxyprocess.cpp | C++ | apache-2.0 | 23,023 |
/**
* @license
* Copyright 2018-2021 Balena Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as semver from 'balena-semver';
import * as Docker from 'dockerode';
import * as _ from 'lodash';
import { Composition } from 'resin-compose-parse';
import {
BuildTask,
getAuthConfigObj,
LocalImage,
RegistrySecrets,
} from 'resin-multibuild';
import type { Readable } from 'stream';
import { BALENA_ENGINE_TMP_PATH } from '../../config';
import { ExpectedError } from '../../errors';
import {
checkBuildSecretsRequirements,
loadProject,
makeBuildTasks,
tarDirectory,
} from '../compose_ts';
import Logger = require('../logger');
import { DeviceAPI, DeviceInfo } from './api';
import * as LocalPushErrors from './errors';
import LivepushManager from './live';
import { displayBuildLog } from './logs';
import { stripIndent } from '../lazy';
const LOCAL_APPNAME = 'localapp';
const LOCAL_RELEASEHASH = 'localrelease';
// Define the logger here so the debug output
// can be used everywhere
const globalLogger = Logger.getLogger();
export interface DeviceDeployOptions {
source: string;
deviceHost: string;
devicePort?: number;
dockerfilePath?: string;
registrySecrets: RegistrySecrets;
multiDockerignore: boolean;
nocache: boolean;
nogitignore: boolean; // v13: delete this line
noParentCheck: boolean;
nolive: boolean;
pull: boolean;
detached: boolean;
services?: string[];
system: boolean;
env: string[];
convertEol: boolean;
}
interface ParsedEnvironment {
[serviceName: string]: { [key: string]: string };
}
async function environmentFromInput(
envs: string[],
serviceNames: string[],
logger: Logger,
): Promise<ParsedEnvironment> {
// A normal environment variable regex, with an added part
// to find a colon followed servicename at the start
const varRegex = /^(?:([^\s:]+):)?([^\s]+?)=(.*)$/;
const ret: ParsedEnvironment = {};
// Populate the object with the servicenames, as it
// also means that we can do a fast lookup of whether a
// service exists
for (const service of serviceNames) {
ret[service] = {};
}
for (const env of envs) {
const maybeMatch = env.match(varRegex);
if (maybeMatch == null) {
throw new ExpectedError(`Unable to parse environment variable: ${env}`);
}
const match = maybeMatch!;
let service: string | undefined;
if (match[1]) {
// This is for a service, we check that it actually
// exists
if (!(match[1] in ret)) {
logger.logDebug(
`Warning: Cannot find a service with name ${match[1]}. Treating the string as part of the environment variable name.`,
);
match[2] = `${match[1]}:${match[2]}`;
} else {
service = match[1];
}
}
if (service != null) {
ret[service][match[2]] = match[3];
} else {
for (const serviceName of serviceNames) {
ret[serviceName][match[2]] = match[3];
}
}
}
return ret;
}
export async function deployToDevice(opts: DeviceDeployOptions): Promise<void> {
// Resolve .local addresses to IP to avoid
// issue with Windows and rapid repeat lookups.
// see: https://github.com/balena-io/balena-cli/issues/1518
if (opts.deviceHost.includes('.local')) {
const util = await import('util');
const dns = await import('dns');
const { address } = await util.promisify(dns.lookup)(opts.deviceHost, {
family: 4,
});
opts.deviceHost = address;
}
const port = 48484;
const api = new DeviceAPI(globalLogger, opts.deviceHost, port);
// First check that we can access the device with a ping
try {
globalLogger.logDebug('Checking we can access device');
await api.ping();
} catch (e) {
throw new ExpectedError(stripIndent`
Could not communicate with device supervisor at address ${opts.deviceHost}:${port}.
Device may not have local mode enabled. Check with:
balena device local-mode <device-uuid>
`);
}
const versionError = new Error(
'The supervisor version on this remote device does not support multicontainer local mode. ' +
'Please update your device to balenaOS v2.20.0 or greater from the dashboard.',
);
try {
const version = await api.getVersion();
globalLogger.logDebug(`Checking device supervisor version: ${version}`);
if (!semver.satisfies(version, '>=7.21.4')) {
throw new ExpectedError(versionError);
}
if (!opts.nolive && !semver.satisfies(version, '>=9.7.0')) {
globalLogger.logWarn(
`Using livepush requires a balena supervisor version >= 9.7.0. A live session will not be started.`,
);
opts.nolive = true;
}
} catch (e) {
// Very old supervisor versions do not support /version endpoint
// a DeviceAPIError is expected in this case
if (e instanceof LocalPushErrors.DeviceAPIError) {
throw new ExpectedError(versionError);
} else {
throw e;
}
}
globalLogger.logInfo(`Starting build on device ${opts.deviceHost}`);
const project = await loadProject(globalLogger, {
convertEol: opts.convertEol,
dockerfilePath: opts.dockerfilePath,
multiDockerignore: opts.multiDockerignore,
nogitignore: opts.nogitignore, // v13: delete this line
noParentCheck: opts.noParentCheck,
projectName: 'local',
projectPath: opts.source,
isLocal: true,
});
// Attempt to attach to the device's docker daemon
const docker = connectToDocker(
opts.deviceHost,
opts.devicePort != null ? opts.devicePort : 2375,
);
await checkBuildSecretsRequirements(docker, opts.source);
globalLogger.logDebug('Tarring all non-ignored files...');
const tarStream = await tarDirectory(opts.source, {
composition: project.composition,
convertEol: opts.convertEol,
multiDockerignore: opts.multiDockerignore,
nogitignore: opts.nogitignore, // v13: delete this line
});
// Try to detect the device information
const deviceInfo = await api.getDeviceInformation();
let buildLogs: Dictionary<string> | undefined;
if (!opts.nolive) {
buildLogs = {};
}
const { awaitInterruptibleTask } = await import('../helpers');
const buildTasks = await awaitInterruptibleTask<typeof performBuilds>(
performBuilds,
project.composition,
tarStream,
docker,
deviceInfo,
globalLogger,
opts,
buildLogs,
);
globalLogger.outputDeferredMessages();
// Print a newline to clearly separate build time and runtime
console.log();
const envs = await environmentFromInput(
opts.env,
Object.getOwnPropertyNames(project.composition.services),
globalLogger,
);
globalLogger.logDebug('Setting device state...');
// Now set the target state on the device
const currentTargetState = await api.getTargetState();
const targetState = generateTargetState(
currentTargetState,
project.composition,
buildTasks,
envs,
);
globalLogger.logDebug(`Sending target state: ${JSON.stringify(targetState)}`);
await api.setTargetState(targetState);
// Now that we've set the target state, the device will do it's thing
// so we can either just display the logs, or start a livepush session
// (whilst also display logs)
const promises: Array<Promise<void>> = [streamDeviceLogs(api, opts)];
let livepush: LivepushManager | null = null;
if (!opts.nolive) {
livepush = new LivepushManager({
api,
buildContext: opts.source,
buildTasks,
docker,
logger: globalLogger,
composition: project.composition,
buildLogs: buildLogs!,
deployOpts: opts,
});
promises.push(livepush.init());
if (opts.detached) {
globalLogger.logLivepush(
'Running in detached mode, no service logs will be shown',
);
}
globalLogger.logLivepush('Watching for file changes...');
}
try {
await awaitInterruptibleTask(() => Promise.all(promises));
} finally {
// Stop watching files after log streaming ends (e.g. on SIGINT)
livepush?.close();
await livepush?.cleanup();
}
}
async function streamDeviceLogs(
deviceApi: DeviceAPI,
opts: DeviceDeployOptions,
) {
// Only show logs if we're not detaching
if (opts.detached) {
return;
}
globalLogger.logInfo('Streaming device logs...');
const { connectAndDisplayDeviceLogs } = await import('./logs');
return connectAndDisplayDeviceLogs({
deviceApi,
logger: globalLogger,
system: opts.system || false,
filterServices: opts.services,
maxAttempts: 1001,
});
}
function connectToDocker(host: string, port: number): Docker {
return new Docker({
host,
port,
Promise: require('bluebird'),
});
}
async function performBuilds(
composition: Composition,
tarStream: Readable,
docker: Docker,
deviceInfo: DeviceInfo,
logger: Logger,
opts: DeviceDeployOptions,
buildLogs?: Dictionary<string>,
): Promise<BuildTask[]> {
const multibuild = await import('resin-multibuild');
const buildTasks = await makeBuildTasks(
composition,
tarStream,
deviceInfo,
logger,
LOCAL_APPNAME,
LOCAL_RELEASEHASH,
(content) => {
if (!opts.nolive) {
return LivepushManager.preprocessDockerfile(content);
} else {
return content;
}
},
);
logger.logDebug('Probing remote daemon for cache images');
await assignDockerBuildOpts(docker, buildTasks, opts);
// If we're passed a build logs object make sure to set it
// up properly
let logHandlers: ((serviceName: string, line: string) => void) | undefined;
if (buildLogs != null) {
for (const task of buildTasks) {
if (!task.external) {
buildLogs[task.serviceName] = '';
}
}
logHandlers = (serviceName: string, line: string) => {
buildLogs[serviceName] += `${line}\n`;
};
}
logger.logDebug('Starting builds...');
assignOutputHandlers(buildTasks, logger, logHandlers);
const localImages = await multibuild.performBuilds(
buildTasks,
docker,
BALENA_ENGINE_TMP_PATH,
);
// Check for failures
await inspectBuildResults(localImages);
const imagesToRemove: string[] = [];
// Now tag any external images with the correct name that they should be,
// as this won't be done by resin-multibuild
await Promise.all(
localImages.map(async (localImage) => {
if (localImage.external) {
// We can be sure that localImage.name is set here, because of the failure code above
const image = docker.getImage(localImage.name!);
await image.tag({
repo: generateImageName(localImage.serviceName),
force: true,
});
imagesToRemove.push(localImage.name!);
}
}),
);
await Promise.all(
_.uniq(imagesToRemove).map((image) =>
docker.getImage(image).remove({ force: true }),
),
);
return buildTasks;
}
// Rebuild a single container, execute it on device, and
// return the build logs
export async function rebuildSingleTask(
serviceName: string,
docker: Docker,
logger: Logger,
deviceInfo: DeviceInfo,
composition: Composition,
source: string,
opts: DeviceDeployOptions,
// To cancel a running build, you must first find the
// container id that it's running in. This is printed in
// the logs, so any calller who wants to keep track of
// this should provide the following callback
containerIdCb?: (id: string) => void,
): Promise<string> {
const multibuild = await import('resin-multibuild');
// First we run the build task, to get the new image id
let buildLogs = '';
const logHandler = (_s: string, line: string) => {
buildLogs += `${line}\n`;
if (containerIdCb != null) {
const match = line.match(/^\s*--->\s*Running\s*in\s*([a-f0-9]*)\s*$/i);
if (match != null) {
containerIdCb(match[1]);
}
}
};
const tarStream = await tarDirectory(source, {
composition,
convertEol: opts.convertEol,
multiDockerignore: opts.multiDockerignore,
nogitignore: opts.nogitignore, // v13: delete this line
});
const task = _.find(
await makeBuildTasks(
composition,
tarStream,
deviceInfo,
logger,
LOCAL_APPNAME,
LOCAL_RELEASEHASH,
(content) => {
if (!opts.nolive) {
return LivepushManager.preprocessDockerfile(content);
} else {
return content;
}
},
),
{ serviceName },
);
if (task == null) {
throw new ExpectedError(
`Could not find build task for service ${serviceName}`,
);
}
await assignDockerBuildOpts(docker, [task], opts);
await assignOutputHandlers([task], logger, logHandler);
const [localImage] = await multibuild.performBuilds(
[task],
docker,
BALENA_ENGINE_TMP_PATH,
);
if (!localImage.successful) {
throw new LocalPushErrors.BuildError([
{
error: localImage.error!,
serviceName,
},
]);
}
return buildLogs;
}
function assignOutputHandlers(
buildTasks: BuildTask[],
logger: Logger,
logCb?: (serviceName: string, line: string) => void,
) {
_.each(buildTasks, (task) => {
if (task.external) {
task.progressHook = (progressObj) => {
displayBuildLog(
{ serviceName: task.serviceName, message: progressObj.progress },
logger,
);
};
} else {
task.streamHook = (stream) => {
stream.on('data', (buf: Buffer) => {
const str = _.trimEnd(buf.toString());
if (str !== '') {
displayBuildLog(
{ serviceName: task.serviceName, message: str },
logger,
);
if (logCb) {
logCb(task.serviceName, str);
}
}
});
};
}
});
}
async function getDeviceDockerImages(docker: Docker): Promise<string[]> {
const images = await docker.listImages({ all: true });
return _.map(images, 'Id');
}
// Mutates buildTasks
async function assignDockerBuildOpts(
docker: Docker,
buildTasks: BuildTask[],
opts: DeviceDeployOptions,
): Promise<void> {
// Get all of the images on the remote docker daemon, so
// that we can use all of them for cache
const images = await getDeviceDockerImages(docker);
globalLogger.logDebug(`Using ${images.length} on-device images for cache...`);
await Promise.all(
buildTasks.map(async (task: BuildTask) => {
task.dockerOpts = {
cachefrom: images,
labels: {
'io.resin.local.image': '1',
'io.resin.local.service': task.serviceName,
},
t: generateImageName(task.serviceName),
nocache: opts.nocache,
forcerm: true,
pull: opts.pull,
};
if (task.external) {
task.dockerOpts.authconfig = await getAuthConfigObj(
task.imageName!,
opts.registrySecrets,
);
} else {
task.dockerOpts.registryconfig = opts.registrySecrets;
}
}),
);
}
function generateImageName(serviceName: string): string {
return `local_image_${serviceName}:latest`;
}
export function generateTargetState(
currentTargetState: any,
composition: Composition,
buildTasks: BuildTask[],
env: ParsedEnvironment,
): any {
const keyedBuildTasks = _.keyBy(buildTasks, 'serviceName');
const services: { [serviceId: string]: any } = {};
let idx = 1;
_.each(composition.services, (opts, name) => {
// Get rid of any build specific stuff
opts = _.cloneDeep(opts);
delete opts.build;
delete opts.image;
const defaults = {
environment: {},
labels: {},
};
opts.environment = _.merge(opts.environment, env[name]);
// This function can be called with a subset of the
// build tasks, when a single dockerfile has changed
// when livepushing, so check the build task exists for
// this composition entry (everything else in this
// function comes from the composition which doesn't
// change)
let contract;
if (name in keyedBuildTasks) {
contract = keyedBuildTasks[name].contract;
}
services[idx] = {
...defaults,
...opts,
...(contract != null ? { contract } : {}),
...{
imageId: idx,
serviceName: name,
serviceId: idx,
image: generateImageName(name),
running: true,
},
};
idx += 1;
});
const targetState = _.cloneDeep(currentTargetState);
delete targetState.local.apps;
targetState.local.apps = {
1: {
name: LOCAL_APPNAME,
commit: LOCAL_RELEASEHASH,
releaseId: '1',
services,
volumes: composition.volumes || {},
networks: composition.networks || {},
},
};
return targetState;
}
async function inspectBuildResults(images: LocalImage[]): Promise<void> {
const failures: LocalPushErrors.BuildFailure[] = [];
_.each(images, (image) => {
if (!image.successful) {
failures.push({
error: image.error!,
serviceName: image.serviceName,
});
}
});
if (failures.length > 0) {
throw new LocalPushErrors.BuildError(failures).toString();
}
}
| resin-io/resin-cli | lib/utils/device/deploy.ts | TypeScript | apache-2.0 | 16,586 |
##############################################################################
# Copyright 2017-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pytest
import os
import numpy as np
from unittest.mock import patch, MagicMock, Mock
import json
from pyquil.api import Job, QVMConnection
from grove.tomography.tomography import (MAX_QUBITS_PROCESS_TOMO,
default_channel_ops)
from grove.tomography.process_tomography import (DEFAULT_PROCESS_TOMO_SETTINGS,
process_tomography_programs,
do_process_tomography, ProcessTomography,
COMPLETELY_POSITIVE)
from grove.tomography.process_tomography import (TRACE_PRESERVING)
from grove.tomography.utils import (make_histogram,
sample_bad_readout, basis_state_preps,
estimate_assignment_probs, BAD_2Q_READOUT, SEED,
EPS, CNOT_PROGRAM, import_qutip, import_cvxpy)
from grove.tomography.operator_utils import make_diagonal_povm, POVM_PI_BASIS
qt = import_qutip()
cvxpy = import_cvxpy()
if not qt:
pytest.skip("Qutip not installed, skipping tests", allow_module_level=True)
if not cvxpy:
pytest.skip("CVXPY not installed, skipping tests", allow_module_level=True)
SHOTS_PATH = os.path.join(os.path.dirname(__file__), 'process_shots.json')
RESULTS_PATH = os.path.join(os.path.dirname(__file__), 'process_results.json')
sample_bad_readout = MagicMock(sample_bad_readout)
sample_bad_readout.side_effect = [np.array(shots) for shots in json.load(open(SHOTS_PATH, 'r'))]
# these mocks are set up such that a single mock Job is returned by the QVMConnection's wait_for_job
# but calling job.result() returns a different value every time via the side_effect defined below
cxn = MagicMock(QVMConnection)
job = MagicMock(Job)
job.result.side_effect = json.load(open(RESULTS_PATH, 'r'))
cxn.wait_for_job.return_value = job
def test_process_tomography():
num_qubits = len(CNOT_PROGRAM.get_qubits())
dimension = 2 ** num_qubits
tomo_seq = list(process_tomography_programs(CNOT_PROGRAM))
nsamples = 3000
np.random.seed(SEED)
# We need more samples on the readout to ensure convergence.
state_prep_hists = [make_histogram(sample_bad_readout(p, 2 * nsamples, BAD_2Q_READOUT, cxn),
dimension) for p in basis_state_preps(*range(num_qubits))]
assignment_probs = estimate_assignment_probs(state_prep_hists)
histograms = np.zeros((len(tomo_seq), dimension))
for jj, p in enumerate(tomo_seq):
histograms[jj] = make_histogram(sample_bad_readout(p, nsamples, BAD_2Q_READOUT, cxn),
dimension)
channel_ops = list(default_channel_ops(num_qubits))
histograms = histograms.reshape((len(channel_ops), len(channel_ops), dimension))
povm = make_diagonal_povm(POVM_PI_BASIS ** num_qubits, assignment_probs)
cnot_ideal = qt.cnot()
for settings in [
DEFAULT_PROCESS_TOMO_SETTINGS,
DEFAULT_PROCESS_TOMO_SETTINGS._replace(constraints={TRACE_PRESERVING}),
DEFAULT_PROCESS_TOMO_SETTINGS._replace(constraints={TRACE_PRESERVING, COMPLETELY_POSITIVE}),
]:
process_tomo = ProcessTomography.estimate_from_ssr(histograms, povm, channel_ops,
channel_ops,
settings)
assert abs(1 - process_tomo.avg_gate_fidelity(cnot_ideal)) < EPS
transfer_matrix = process_tomo.pauli_basis.transfer_matrix(qt.to_super(cnot_ideal))
assert abs(1 - process_tomo.avg_gate_fidelity(transfer_matrix)) < EPS
chi_rep = process_tomo.to_chi().data.toarray()
# When comparing to the identity, the chi representation is quadratically larger than the
# Hilbert space representation, so we take a square root.
probabilty_scale = np.sqrt(chi_rep.shape[0])
super_op_from_chi = np.zeros(process_tomo.pauli_basis.ops[0].shape, dtype=np.complex128)
for i, si in enumerate(process_tomo.pauli_basis.ops):
for j, sj in enumerate(process_tomo.pauli_basis.ops):
contribution = chi_rep[i][j] * si.data.toarray().conj().T.dot(sj.data.toarray())
super_op_from_chi += contribution / probabilty_scale
assert np.isclose(np.eye(process_tomo.pauli_basis.ops[0].shape[0]), super_op_from_chi,
atol=EPS).all()
choi_rep = process_tomo.to_choi()
# Choi matrix should be a valid density matrix, scaled by the dimension of the system.
assert np.isclose(np.trace(choi_rep.data.toarray()) / probabilty_scale, 1, atol=EPS)
super_op = process_tomo.to_super()
# The map should be trace preserving.
assert np.isclose(np.sum(super_op[0]), 1, atol=EPS)
kraus_ops = process_tomo.to_kraus()
assert np.isclose(sum(np.trace(k.conjugate().T.dot(k)) for k in kraus_ops),
kraus_ops[0].shape[0], atol=.1)
assert abs(1 - process_tomo.avg_gate_fidelity(qt.to_super(cnot_ideal))) < EPS
with patch("grove.tomography.utils.plot_pauli_transfer_matrix"), \
patch("grove.tomography.process_tomography.plt") as mplt:
mplt.subplots.return_value = Mock(), Mock()
process_tomo.plot()
def test_do_process_tomography():
nsamples = 3000
qubits = list(range(MAX_QUBITS_PROCESS_TOMO + 1))
# Test with too many qubits.
with pytest.raises(ValueError):
_ = do_process_tomography(CNOT_PROGRAM, nsamples,
cxn, qubits)
process_tomo, assignment_probs, histograms = do_process_tomography(CNOT_PROGRAM, nsamples, cxn)
cnot_ideal = qt.cnot()
assert abs(1 - process_tomo.avg_gate_fidelity(cnot_ideal)) < EPS
for histogram_collection in histograms:
for histogram in histogram_collection:
assert np.sum(histogram) == nsamples
num_qubits = len(CNOT_PROGRAM.get_qubits())
assert np.isclose(assignment_probs, np.eye(2 ** num_qubits), atol=EPS).all()
| rigetticomputing/grove | grove/tests/tomography/test_process_tomography.py | Python | apache-2.0 | 6,859 |
/*
* $Id$
*/
package lia.util.net.copy;
import java.nio.ByteBuffer;
import java.util.UUID;
/**
* Wrapper class for a simple block ( can be an offset in whatever stream ... not only a file )
*
* @author ramiro
*/
public class FileBlock {
//used for signaling between Producers/Consumers
//public static final FileBlock EOF_FB = new FileBlock(UUID.randomUUID(), UUID.randomUUID(), -1, ByteBuffer.allocate(0));
public final UUID fdtSessionID;
public final UUID fileSessionID;
public final long fileOffset;
public final ByteBuffer buff;
private FileBlock(final UUID fdtSessionID, final UUID fileSessionID, final long fileOffset, final ByteBuffer buff) {
if (fdtSessionID == null) {
throw new NullPointerException(" [ FDT Bug ? ] fdtSessionID cannot be null; fileSessionID: " + fileSessionID);
}
if (fileSessionID == null) {
throw new NullPointerException(" [ FDT Bug ? ] fileSessionID cannot be null; fdtSessionID: " + fdtSessionID);
}
if (buff == null) {
throw new NullPointerException(" [ FDT Bug ? ] buff cannot be null; fdtSessionID: " + fdtSessionID + " fileSessionID: " + fileSessionID);
}
this.fdtSessionID = fdtSessionID;
this.fileSessionID = fileSessionID;
this.fileOffset = fileOffset;
this.buff = buff;
}
//TODO - Make a simple cache of FileBlock-s objects ... I don't think FDT will gain anything from this "feature"
// so I will not implement it, yet
public static FileBlock getInstance(UUID fdtSessionID, UUID fileSessionID, long fileOffset, ByteBuffer buff) {
return new FileBlock(fdtSessionID, fileSessionID, fileOffset, buff);
}
public String toString() {
return "FileBlock for [ " + fileSessionID + " ] offset: " + fileOffset + " payload: " + buff;
}
}
| MonALISA-CIT/fdt | src/lia/util/net/copy/FileBlock.java | Java | apache-2.0 | 1,871 |
<?php
/*********
* Author: Iman Biswas
* Date : 14 sep 2011
* Modified By:
* Modified Date:
*
* Purpose:
* Model For User Type Master
*
* @package User
* @subpackage Access Control
*
* @includes infModel.php
* @includes MY_Model.php
*
* @link MY_Model.php
*/
class User_type_model extends MY_Model implements InfModel
{
private $conf;
private $tbl;///used for this class
public function __construct()
{
try
{
parent::__construct();
$this->tbl=$this->db->USER_TYPE;
$this->conf=&get_config();
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/******
* This method will fetch all records from the db.
*
* @param string $s_where, ex- " status=1 "
* @param int $i_start, starting value for pagination
* @param int $i_limit, number of records to fetch used for pagination
* @returns array
*/
public function fetch_multi($s_where=null,$i_start=null,$i_limit=null)
{
try
{
$ret_=array();
$s_qry="SELECT * FROM ".$this->tbl." ut "
.($s_where!=""?$s_where:"" ).(is_numeric($i_start) && is_numeric($i_limit)?" Limit ".intval($i_start).",".intval($i_limit):"" );
$rs=$this->db->query($s_qry);
$i_cnt=0;
if($rs->num_rows()>0)
{
//while($row=)
foreach($rs->result() as $row)
{
$ret_[$i_cnt]["id"]=$row->id;////always integer
$ret_[$i_cnt]["s_user_type"]=stripslashes($row->s_user_type);
$ret_[$i_cnt]["i_created_by"]=intval($row->i_created_by);
$ret_[$i_cnt]["dt_created_on"]=date($this->conf["site_date_format"],intval($row->dt_created_on));
$ret_[$i_cnt]["i_status"]=intval($row->i_status);
$ret_[$i_cnt]["s_status"]=(intval($row->i_status)==1?"Active":"Inactive");
$i_cnt++;
}
$rs->free_result();
}
unset($s_qry,$rs,$row,$i_cnt,$s_where,$i_start,$i_limit);
return $ret_;
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/****
* Fetch Total records
* @param string $s_where, ex- " status=1 AND deleted=0 "
* @returns int on success and FALSE if failed
*/
public function gettotal_info($s_where=null)
{
try
{
$ret_=0;
$s_qry="Select count(*) as i_total "
."From ".$this->tbl." ut "
.($s_where!=""?$s_where:"" );
//echo $s_qry;
$rs=$this->db->query($s_qry);
$i_cnt=0;
if($rs->num_rows()>0)
{
//while($row=)
foreach($rs->result() as $row)
{
$ret_=intval($row->i_total);
}
$rs->free_result();
}
unset($s_qry,$rs,$row,$i_cnt,$s_where,$i_start,$i_limit);
return $ret_;
//return $this->db->count_all($this->tbl);
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/*******
* Fetches One record from db for the id value.
*
* @param int $i_id
* @returns array
*/
public function fetch_this($i_id)
{
try
{
$ret_=array();
////Using Prepared Statement///
$s_qry="Select * "
."From ".$this->tbl." ut "
." Where ut.id=?";
$rs=$this->db->query($s_qry,array(intval($i_id)));
if($rs->num_rows()>0)
{
foreach($rs->result() as $row)
{
$ret_["id"]=$row->id;////always integer
$ret_["s_user_type"]=stripslashes($row->s_user_type);
$ret_["dt_created_on"]=date($this->conf["site_date_format"],intval($row->dt_created_on));
$ret_["i_status "]=intval($row->i_status );
$ret_["s_status"]=(intval($row->i_status )==1?"Active":"Inactive");
$ret_["access_controll_array"] = $this->fetch_controller_access($ret_["id"]);
}
$rs->free_result();
}
unset($s_qry,$rs,$row,$i_id);
return $ret_;
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/***
* Inserts new records into db. As we know the table name
* we will not pass it into params.
*
* @param array $info, array of fields(as key) with values,ex-$arr["field_name"]=value
* @returns $i_new_id on success and FALSE if failed
*/
public function add_info($info)
{
try
{
$i_ret_=0;////Returns false
if(!empty($info))
{
$s_qry="Insert Into ".$this->tbl." Set ";
$s_qry.=" s_user_type=? ";
$s_qry.=", dt_created_on=? ";
//$s_qry.=", i_is_deleted=?"; ///have default value
$this->db->query($s_qry,array(addslashes(htmlspecialchars(trim($info["s_user_type"]))),
intval($info["dt_created_on"])
));
$i_ret_=$this->db->insert_id();
if($i_ret_)
{
$logi["msg"]="Inserting into ".$this->tbl." ";
$logi["sql"]= serialize(array($s_qry,array(addslashes(htmlspecialchars(trim($info["s_user_type"]))),
intval($info["dt_created_on"])
)) ) ;
$this->log_info($logi);
unset($logi,$logindata);
}
}
unset($s_qry);
return $i_ret_;
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/***
* Update records in db. As we know the table name
* we will not pass it into params.
*
* @param array $info, array of fields(as key) with values,ex-$arr["field_name"]=value
* @param int $i_id, id value to be updated used in where clause
* @returns $i_rows_affected on success and FALSE if failed
*/
public function edit_info($info,$i_id)
{
try
{
$i_ret_=0;////Returns false
if(!empty($info))
{
$s_qry="Update ".$this->tbl." Set ";
$s_qry.=" s_user_type=? ";
//$s_qry.=", i_created_by=? ";
//$s_qry.=", dt_created_on=? ";
//$s_qry.=", i_is_deleted=? "; ///have default value
$s_qry.=" Where id=? ";
$this->db->query($s_qry,array( addslashes(htmlspecialchars(trim($info["s_user_type"]))) ,
/*intval($info["i_created_by"]),
intval($info["dt_created_on"]),*/
intval($i_id)
));
$i_ret_=$this->db->affected_rows();
if($i_ret_)
{
$logi["msg"]="Updating ".$this->tbl." ";
$logi["sql"]= serialize(array($s_qry,array(addslashes(htmlspecialchars(trim($info["s_user_type"]))),
intval($info["i_created_by"]),
intval($info["dt_created_on"]),
intval($i_id)
)) ) ;
$this->log_info($logi);
unset($logi,$logindata);
}
}
unset($s_qry);
return $i_ret_;
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/******
* Deletes all or single record from db.
* For Master entries deletion only change the flag i_is_deleted.
*
* @param int $i_id, id value to be deleted used in where clause
* @returns $i_rows_affected on success and FALSE if failed
*
*/
public function delete_info($i_id)
{
try
{
$i_ret_=0;////Returns false
if(intval($i_id)>0)
{
$s_qry="Update ".$this->tbl." Set i_is_deleted=1 ";
$s_qry.=" Where id=? ";
$this->db->query($s_qry, array(intval($i_id)) );
$i_ret_=$this->db->affected_rows();
if($i_ret_)
{
$logi["msg"]="Deleting ".$this->tbl." ";
$logi["sql"]= serialize(array($s_qry, array(intval($i_id))) ) ;
$this->log_info($logi);
unset($logi,$logindata);
}
}
elseif(intval($i_id)==-1)////Deleting All
{
$s_qry="Update ".$this->tbl." Set i_is_deleted=1 ";
$this->db->query($s_qry);
$i_ret_=$this->db->affected_rows();
if($i_ret_)
{
$logi["msg"]="Deleting all information from ".$this->tbl." ";
$logi["sql"]= serialize(array($s_qry) ) ;
$this->log_info($logi);
unset($logi,$logindata);
}
}
unset($s_qry);
return $i_ret_;
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/****
* Register a log for add,edit and delete operation
*
* @param mixed $attr
* @returns TRUE on success and FALSE if failed
*/
public function log_info($attr)
{
try
{
$logindata=$this->session->userdata("admin_loggedin");
return $this->write_log($attr["msg"],decrypt($logindata["user_id"]),($attr["sql"]?$attr["sql"]:""));
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/******
* This method will fetch all records regarding controller access from the db.
* * @param int $s_controller, i_user_type_id
* @returns array
*/
public function fetch_controller_access($i_user_type_id=null,$s_controller=null)
{
try
{
$ret_=array();
/////////////////Define your query here/////////////
$s_qry="Select uta.id,uta.i_user_type_id,uta.s_controller,uta.i_action_add,uta.i_action_edit,uta.i_action_delete,ut.s_user_type
,uta.dt_created_on,uta.i_is_deleted "
."From ".$this->db->USER_TYPE_ACCESS." uta "
."Left Join ".$this->db->USER_TYPE." ut On uta.i_user_type_id=ut.id "
." Where uta.i_user_type_id=?";
/////////////////end Define your query here/////////////
$this->db->trans_begin();///new
$rs=$this->db->query($s_qry,array(intval($i_user_type_id)));
if(is_array($rs->result()))
{
foreach($rs->result() as $row)
{
$ret_[$row->s_controller]["id"]=$row->id;////always integer
$ret_[$row->s_controller]['controller']=get_unformatted_string($row->s_controller);
$ret_[$row->s_controller]['i_action_add']=intval($row->i_action_add);
$ret_[$row->s_controller]["i_action_edit"]=intval($row->i_action_edit);
$ret_[$row->s_controller]["i_action_delete"]=intval($row->i_action_delete);
$ret_[$row->s_controller]["i_user_type_id"]=intval($row->i_user_type_id);
$ret_[$row->s_controller]["s_user_type"]=get_unformatted_string($row->s_user_type);
$ret_[$row->s_controller]["dt_created_on"]=date($this->conf["site_date_format"],strtotime($row->dt_created_on));
$ret_[$row->s_controller]["i_is_deleted"]=intval($row->i_is_deleted);
$ret_[$row->s_controller]["s_is_deleted"]=(intval($row->i_is_deleted)==1?"Removed":"");
$i_cnt++;
}
$rs->free_result();
}
$this->db->trans_commit();///new
unset($s_qry,$rs,$row,$i_id);
return $ret_;
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/***
* Update user type access records or insert anew row in db.
* @param array $info, $i_id with values,ex-$arr["field_name"]=value
* @param $s_controller,$i_id to be updated used in where clause
* @returns $i_rows_affected on success and FALSE if failed
*/
public function update_access($info,$i_id)
{
try
{
$i_ret_=0;////Returns false
if(!empty($info))
{
//check if row exist in access table
// $where = "uta.i_user_type_id='".$i_user_type_id."' AND uta.s_controller='".$s_controller."' ";
if(intval($i_id)>0)//update
{
$s_qry="UPDATE ".$this->db->USER_TYPE_ACCESS." SET ";
$s_qry.=" i_action_add=? ";
$s_qry.=", i_action_edit=? ";
$s_qry.=", i_action_delete=? ";
$s_qry.=" WHERE id=?";
$this->db->trans_begin();///new
$this->db->query($s_qry,array( intval($info["i_action_add"]),
intval($info["i_action_edit"]),
intval($info["i_action_delete"]),
intval($i_id)
));
$i_ret_=$this->db->affected_rows();
if($i_ret_)
{
$logi["msg"]="Updating ".$this->tbl." ";
$logi["sql"]= serialize(array($s_qry,array( intval($info["i_action_add"]),
intval($info["i_action_edit"]),
intval($info["i_action_delete"]),
intval($i_id)
)) ) ;
$this->log_info($logi);
unset($logi);
$this->db->trans_commit();///new
}
else
{
$this->db->trans_rollback();///new
}
}
else //add new row
{
$s_qry="INSERT INTO ".$this->db->USER_TYPE_ACCESS
." ( i_user_type_id,
s_controller,
i_action_add,
i_action_edit,
i_action_delete,
dt_created_on)
VALUES (?,?,?,?,?,?)";
$this->db->trans_begin();///new
$this->db->query($s_qry,array(intval($info["i_user_type_id"]),
($info["s_controller"]),
intval($info["i_action_add"]),
intval($info["i_action_edit"]),
intval($info["i_action_delete"]),
date($this->conf["db_date_format"],strtotime($info["dt_created_on"]))
));
$i_ret_=$this->db->insert_id();
if($i_ret_)
{
$logi["msg"]="Inserting into ".$this->tbl." ";
$logi["sql"]= serialize(array($s_qry,array(intval($info["i_user_type_id"]),
($info["s_controller"]),
intval($info["i_action_add"]),
intval($info["i_action_edit"]),
intval($info["i_action_delete"]),
date($this->conf["db_date_format"],strtotime($info["dt_created_on"]))
)) ) ;
$this->log_info($logi);
unset($logi);
$this->db->trans_commit();///new
}
else
{
$this->db->trans_rollback();///new
}
}
// //////////////////////
}
unset($s_qry);
return $i_ret_;
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
/****
* Fetch Menus having access rights to control them.
* Company controller cannot be edited or deleted because in some of php scripts
* the company id used are hardcodded.
*
* @param int $i_user_type_id, user type; 0=> super admin
* @returns array of menu controllers
*/
public function fetch_menus($i_user_type_id=null,$product_section=null)
{
try
{
if($product_section=="Finance")
$all_controllers=$this->db->FIN_CONTROLLER_NAME;
else
$all_controllers=$this->db->CONTROLLER_NAME;
/*For superadmin and others dont allow these controllers to be
* inserted,edited or deleted
* ex- Any user including super admin cannot edit or delete the company master
*/
$force_controller_toreact=array();
$force_controller_toreact=array(
'Auto_mail'=>array(
'action_add'=>0,
'action_edit'=>0,
'action_delete'=>0
),
'Manage_feedback'=>array(
'action_add'=>0,
'action_edit'=>1,
'action_delete'=>0
),
'Manage_jobs'=>array(
'action_add'=>0,
'action_edit'=>0,
'action_delete'=>1
),
'Manage_private_message'=>array(
'action_add'=>0,
'action_edit'=>0,
'action_delete'=>1
),
'Manage_tradesman'=>array(
'action_add'=>0,
'action_edit'=>1,
'action_delete'=>0
),
'Manage_buyers'=>array(
'action_add'=>0,
'action_edit'=>1,
'action_delete'=>0
),
'Manage_verification'=>array(
'action_add'=>1,
'action_edit'=>0,
'action_delete'=>0
),
'Newsletter_subscribers'=>array(
'action_add'=>1,
'action_edit'=>1,
'action_delete'=>0
),
'How_it_works_tradesman'=>array(
'action_add'=>0,
'action_edit'=>1,
'action_delete'=>1
),
'Manage_invoice'=>array(
'action_add'=>0,
'action_edit'=>0,
'action_delete'=>1
),
'Payment_subscription'=>array(
'action_add'=>1,
'action_edit'=>0,
'action_delete'=>0
),
'How_it_works_buyer'=>array(
'action_add'=>0,
'action_edit'=>1,
'action_delete'=>1
)
);
///////////end force to act///////
/**
* Alias Controllers:- when some controller depends on other controller
* then allow access to those controllers as well.
*/
$alias_controllers=array();
$controllers_access=array();
//pr($_SESSION);
//echo $i_user_type_id;exit;
if($i_user_type_id>0)
{
$s_qry="Select * From ".$this->db->USER_TYPE_ACCESS;
$s_qry.=" Where i_user_type_id=? AND (i_action_add=1 OR i_action_edit=1 OR i_action_delete=1 OR s_controller='Dashboard')";///Dashboard is allowed to all users
$this->db->trans_begin();///new
$rs=$this->db->query($s_qry, intval($i_user_type_id));
$i_cnt=0;
if(is_array($rs->result()) ) ///new
{
foreach($rs->result() as $row)
{
$tmp_=stripslashes($row->s_controller);
$controllers_access[$tmp_]=$all_controllers[$tmp_];
/*For superadmin and others dont allow these controllers to be
* inserted,edited or deleted
* ex- Any user including super admin cannot edit or delete the company master
*/
if(!$force_controller_toreact[$tmp_])
{
if($controllers_access[$tmp_]["top_menu"]!="Report")
{
$controllers_access[$tmp_]["action_add"]=intval($row->i_action_add);
$controllers_access[$tmp_]["action_edit"]=intval($row->i_action_edit);
$controllers_access[$tmp_]["action_delete"]=intval($row->i_action_delete);
}
else
{
$controllers_access[$tmp_]["action_add"]=0;
$controllers_access[$tmp_]["action_edit"]=0;
$controllers_access[$tmp_]["action_delete"]=0;
}
}///////end if
else
{
$controllers_access[$tmp_]["action_add"]=$force_controller_toreact[$tmp_]["action_add"];
$controllers_access[$tmp_]["action_edit"]=$force_controller_toreact[$tmp_]["action_edit"];
$controllers_access[$tmp_]["action_delete"]=$force_controller_toreact[$tmp_]["action_delete"];
}///end else
/**
* Alias Controllers:- when some controller depends on other controller
* then allow access to those controllers as well.
* Alias Controllers can be comma seperated Controllers names
*/
if(trim($controllers_access[$tmp_]["alias_controller"])!="")
{
$tmpAlis=explode(",",$controllers_access[$tmp_]["alias_controller"]);
if(is_array($tmpAlis))
{
foreach($tmpAlis as $ali)
{
$alias_name=$ali;
$alias_controllers[$alias_name]["action_add"]=0;
$alias_controllers[$alias_name]["action_edit"]=0;
$alias_controllers[$alias_name]["action_delete"]=0;
}
unset($ali);
}
else
{
$alias_name=$controllers_access[$tmp_]["alias_controller"];
$alias_controllers[$alias_name]["action_add"]=0;
$alias_controllers[$alias_name]["action_edit"]=0;
$alias_controllers[$alias_name]["action_delete"]=0;
}
unset($alias_name,$tmpAlis);
}///end if Alias controller
}
$rs->free_result();
}
$this->db->trans_commit();///new
unset($s_qry,$rs,$row,$i_cnt);
///////Keeping the access rights into the sessions/////
}
else////for super admin
{
foreach($all_controllers as $k=>$menus)
{
$tmp_=$k;
$controllers_access[$tmp_]=$all_controllers[$tmp_];
/*For superadmin and others dont allow these controllers to be
* inserted,edited or deleted
* ex- Any user including super admin cannot edit or delete the company master
*/
if(!$force_controller_toreact[$tmp_])
{
if($controllers_access[$tmp_]["top_menu"]!="Report")
{
$controllers_access[$tmp_]["action_add"]=1;
$controllers_access[$tmp_]["action_edit"]=1;
$controllers_access[$tmp_]["action_delete"]=1;
}
else
{
$controllers_access[$tmp_]["action_add"]=0;
$controllers_access[$tmp_]["action_edit"]=0;
$controllers_access[$tmp_]["action_delete"]=0;
}
}///////end if
else
{
$controllers_access[$tmp_]["action_add"]=$force_controller_toreact[$tmp_]["action_add"];
$controllers_access[$tmp_]["action_edit"]=$force_controller_toreact[$tmp_]["action_edit"];
$controllers_access[$tmp_]["action_delete"]=$force_controller_toreact[$tmp_]["action_delete"];
}///end else
/**
* Alias Controllers:- when some controller depends on other controller
* then allow access to those controllers as well.
*/
if(trim($controllers_access[$tmp_]["alias_controller"])!="")
{
$tmpAlis=explode(",",$controllers_access[$tmp_]["alias_controller"]);
if(is_array($tmpAlis))
{
foreach($tmpAlis as $ali)
{
$alias_name=$ali;
$alias_controllers[$alias_name]["action_add"]=0;
$alias_controllers[$alias_name]["action_edit"]=0;
$alias_controllers[$alias_name]["action_delete"]=0;
}
unset($ali);
}
else
{
$alias_name=$controllers_access[$tmp_]["alias_controller"];
$alias_controllers[$alias_name]["action_add"]=0;
$alias_controllers[$alias_name]["action_edit"]=0;
$alias_controllers[$alias_name]["action_delete"]=0;
}
unset($alias_name,$tmpAlis);
}///end if Alias controller
}
}
//$this->session->set_userdata(array("controllers_access"=>$controllers_access));
//pr($this->session->userdata("controllers_access"),1);
/**
* DEFAULT ACCESS TO DASHBOARD
* For a special case:-
* user had created but no access control assigned for that user type.
* then dashboard will have default access to any user who logged in
*/
$controllers_access["Dashboard"]=$all_controllers["Dashboard"];
$controllers_access["Dashboard"]["action_add"]=0;
$controllers_access["Dashboard"]["action_edit"]=1;
$controllers_access["Dashboard"]["action_delete"]=0;
/////end DEFAULT ACCESS TO DASHBOARD///
/**
* Alias Controllers:- when some controller depends on other controller
* then allow access to those controllers as well.
* Finally assigning all alias controllers to main controllers.
*/
if(!empty($alias_controllers))
{
foreach($alias_controllers as $al=>$arr)
{
if(!array_key_exists($al,$controllers_access))
{
$controllers_access[$al]=$arr;
}
}
unset($al,$arr);
}///end if Alias controller
//pr($controllers_access);
unset($all_controllers,$tmp_,$k,$alias_controllers);
return $controllers_access;
}
catch(Exception $err_obj)
{
show_error($err_obj->getMessage());
}
}
public function __destruct()
{}
}
///end of class
?> | mrinsss/Full-Repo | jobshoppa/system/application/models/user_type_model.php | PHP | apache-2.0 | 30,165 |
/*******************************************************************************
*
* Copyright FUJITSU LIMITED 2017
*
* Creation Date: 2012-6-11
*
*******************************************************************************/
package org.oscm.ui.dialog.classic.manageudas;
import java.util.List;
import org.oscm.ui.beans.UdaBean;
/**
* @author yuyin
*
*/
public class ManageUdaDefinitionPage {
private List<UdaDefinitionRowModel> subscriptionUdas;
private List<UdaDefinitionRowModel> customerUdas;
private UdaDefinitionDetails currentUdaDefinition;
private UdaDefinitionDetails newUdaDefinition;
private String udaType;
/**
* @return the subscriptionUdas
*/
public List<UdaDefinitionRowModel> getSubscriptionUdas() {
return subscriptionUdas;
}
/**
* @param subscriptionUdas
* the subscriptionUdas to set
*/
public void setSubscriptionUdas(List<UdaDefinitionRowModel> subscriptionUdas) {
this.subscriptionUdas = subscriptionUdas;
}
/**
* @return the customerUdas
*/
public List<UdaDefinitionRowModel> getCustomerUdas() {
return customerUdas;
}
/**
* @param customerUdas
* the customerUdas to set
*/
public void setCustomerUdas(List<UdaDefinitionRowModel> customerUdas) {
this.customerUdas = customerUdas;
}
/**
* @return the currentUdaDefinition
*/
public UdaDefinitionDetails getCurrentUdaDefinition() {
if (null == currentUdaDefinition) {
currentUdaDefinition = new UdaDefinitionDetails();
}
return currentUdaDefinition;
}
/**
* @param currentUdaDefinition
* the currentUdaDefinition to set
*/
public void setCurrentUda(UdaDefinitionDetails currentUdaDefinition) {
this.currentUdaDefinition = currentUdaDefinition;
}
/**
* @return the newUdaDefinition
*/
public UdaDefinitionDetails getNewUdaDefinition() {
if (null == newUdaDefinition) {
newUdaDefinition = new UdaDefinitionDetails();
}
return newUdaDefinition;
}
/**
* @param newUdaDefinition
* the newUdaDefinition to set
*/
public void setNewUdaDefinition(UdaDefinitionDetails newUdaDefinition) {
this.newUdaDefinition = newUdaDefinition;
}
/**
* @param udaType
* the udaType to set
*/
public void setUdaType(String udaType) {
// refresh newUdaDefinition while open create customer attributes dialog
this.newUdaDefinition = null;
this.udaType = udaType;
}
/**
* @return the udaType
*/
public String getUdaType() {
return udaType;
}
/**
* set current selected Uda to currentUdaDefinition for
* <code>customerUdas</code> or <code>subscriptionUdas</code>
*
* @param index
*/
public void setCurrentUdaIndex(int index) {
if (udaType.equals(UdaBean.CUSTOMER)) {
currentUdaDefinition = UdaModelConverter
.convertUdaDefinitionRowModelToUdaDefDetails(customerUdas
.get(index));
} else {
currentUdaDefinition = UdaModelConverter
.convertUdaDefinitionRowModelToUdaDefDetails(subscriptionUdas
.get(index));
}
}
}
| opetrovski/development | oscm-portal/javasrc/org/oscm/ui/dialog/classic/manageudas/ManageUdaDefinitionPage.java | Java | apache-2.0 | 3,781 |
// (C) Copyright 2014 Hewlett Packard Enterprise Development LP
import React, { Component, PropTypes } from 'react';
import FormattedMessage from './FormattedMessage';
const CLASS_ROOT = "legend";
export default class Legend extends Component {
constructor(props) {
super(props);
this._onActive = this._onActive.bind(this);
this.state = {activeIndex: this.props.activeIndex};
}
componentWillReceiveProps (newProps) {
this.setState({activeIndex: newProps.activeIndex});
}
_onActive (index) {
this.setState({activeIndex: index});
if (this.props.onActive) {
this.props.onActive(index);
}
}
_itemColorIndex (item, index) {
return item.colorIndex || ('graph-' + (index + 1));
}
render () {
var classes = [CLASS_ROOT];
if (this.props.series.length === 1) {
classes.push(CLASS_ROOT + "--single");
}
if (this.props.className) {
classes.push(this.props.className);
}
var totalValue = 0;
var items = this.props.series.map(function (item, index) {
var legendClasses = [CLASS_ROOT + "__item"];
if (index === this.state.activeIndex) {
legendClasses.push(CLASS_ROOT + "__item--active");
}
if (item.onClick) {
legendClasses.push(CLASS_ROOT + "__item--clickable");
}
var colorIndex = this._itemColorIndex(item, index);
totalValue += item.value;
var valueClasses = [CLASS_ROOT + "__item-value"];
if (1 === this.props.series.length) {
valueClasses.push("large-number-font");
}
var swatch;
if (item.hasOwnProperty('colorIndex')) {
swatch = (
<svg className={CLASS_ROOT + "__item-swatch color-index-" + colorIndex}
viewBox="0 0 12 12">
<path className={item.className} d="M 5 0 l 0 12" />
</svg>
);
}
var label;
if (item.hasOwnProperty('label')) {
label = (
<span className={CLASS_ROOT + "__item-label"}>{item.label}</span>
);
}
var value;
if (item.hasOwnProperty('value')) {
value = (
<span className={valueClasses.join(' ')}>
{item.value}
<span className={CLASS_ROOT + "__item-units"}>
{item.units || this.props.units}
</span>
</span>
);
}
return (
<li key={item.label || index} className={legendClasses.join(' ')}
onClick={item.onClick}
onMouseOver={this._onActive.bind(this, index)}
onMouseOut={this._onActive.bind(this, null)} >
{swatch}
{label}
{value}
</li>
);
}, this);
// build legend from bottom to top, to align with Meter bar stacking
items.reverse();
var total = null;
if (this.props.total && this.props.series.length > 1) {
total = (
<li className={CLASS_ROOT + "__total"}>
<span className={CLASS_ROOT + "__total-label"}>
<FormattedMessage id="Total" defaultMessage="Total" />
</span>
<span className={CLASS_ROOT + "__total-value"}>
{totalValue}
<span className={CLASS_ROOT + "__total-units"}>{this.props.units}</span>
</span>
</li>
);
}
return (
<ol className={classes.join(' ')} role="presentation">
{items.reverse()}
{total}
</ol>
);
}
}
Legend.propTypes = {
activeIndex: PropTypes.number,
onActive: PropTypes.func,
series: PropTypes.arrayOf(PropTypes.shape({
label: PropTypes.string,
value: PropTypes.number,
units: PropTypes.string,
colorIndex: PropTypes.oneOfType([
PropTypes.number, // 1-6
PropTypes.string // status
]),
onClick: PropTypes.func
})).isRequired,
total: PropTypes.bool,
units: PropTypes.string,
value: PropTypes.number
};
| samogami/grommet | src/js/components/Legend.js | JavaScript | apache-2.0 | 3,868 |
/* Copyright (c) 2015 Magnet Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.magnet.mmx.protocol;
/**
* @hide
* Common MMX element, namespace and attributes.
*/
public class Constants {
/**
* The default domain or service name for MMX server.
*/
public final static String MMX_DOMAIN = "mmx";
/**
* The max length for topic name.
*/
public final static int MMX_MAX_TOPIC_LEN = 50;
public final static int MMX_MAX_CHANNEL_LEN = 50;
/**
* The max length for tag name.
*/
public final static int MMX_MAX_TAG_LEN = 25;
/**
* The minimum user ID length for regular user account.
*/
public final static int MMX_MIN_USERID_LEN = 5;
/**
* The maximum user ID length for regular user account.
*/
public final static int MMX_MAX_USERID_LEN = 42;
/**
* The max payload size in bytes that MMX server supports. It is not
* necessary same as what the client allows.
*/
public final static int MAX_PAYLOAD_SIZE = 2 * 1024 * 1024;
/**
* The payload threshold in bytes to switch from RAM to memory-mapped I/O.
*/
public final static int PAYLOAD_THRESHOLD = 102400;
/**
* A delimiter used in multi-tenant environment.
*/
public final static char APP_ID_DELIMITER = '%';
/**
* A flag to control if '@' is allowed in the user ID.
*/
public final static boolean AT_SIGN_DISALLOWED = false;
/**
* The current protocol major version number.
*/
public final static int MMX_VERSION_MAJOR = 2;
/**
* The current protocol minor version number.
*/
public final static int MMX_VERSION_MINOR = 1;
/**
* The elements for MMX.
*/
public final static String MMX_ELEMENT = "mmx";
public final static String MMX = MMX_ELEMENT;
public final static String MMX_APP_REG = MMX_ELEMENT;
public final static String MMX_DEV_REG = MMX_ELEMENT;
public final static String MMX_MMXMETA = "mmxmeta";
public final static String MMX_META = "meta";
public final static String MMX_PAYLOAD = "payload";
/**
* The default encoding type to be used for binary payload.
*/
public final static String BASE64 = "base64";
/**
* The default message type if it is not specified or it is empty.
*/
public final static String MMX_MTYPE_UNKNOWN = "unknown";
/**
* The message type for GeoLocaion.
*/
public final static String MMX_MTYPE_GEOLOC = "geoloc";
/**
* The message type for MMXError.
*/
public final static String MMX_MTYPE_ERROR = "mmxerror";
// XEP-0184 message delivery receipts
public final static String XMPP_REQUEST = "request";
public final static String XMPP_RECEIVED = "received";
public final static String XMPP_ATTR_ID = "id";
public final static String XMPP_NS_RECEIPTS = "urn:xmpp:receipts";
/**
* The namespaces used in the MMX extension.
*/
public final static String MMX_NS_APP = "com.magnet:appreg";
public final static String MMX_NS_DEV = "com.magnet:dev";
public final static String MMX_NS_USER = "com.magnet:user";
public final static String MMX_NS_AUTH = "com.magnet:auth";
public final static String MMX_NS_MSG_ACTION = "com.magnet:msg:action";
public final static String MMX_NS_MSG_PAYLOAD = "com.magnet:msg:payload";
public final static String MMX_NS_MSG_STATE = "com.magnet:msg:state";
public final static String MMX_NS_MSG_ACK = "com.magnet:msg:ack";
public final static String MMX_NS_MSG_PUSH = "com.magnet:msg:push";
public final static String MMX_NS_MSG_WAKEUP = "com.magnet:msg:wakeup";
public final static String MMX_NS_PUBSUB = "com.magnet:pubsub";
public final static String MMX_NS_CONTEXT = "com.magnet:ctx";
public static final String MMX_NS_MSG_SIGNAL = "com.magnet:msg:signal";
public final static String MMX_ACTION_CODE_WAKEUP = "w";
public final static String MMX_ACTION_CODE_PUSH = "p";
/**
* The attributes used in the MMX extension.
*/
public final static String MMX_ATTR_COMMAND = "command";
public final static String MMX_ATTR_CTYPE = "ctype";
public final static String MMX_ATTR_MTYPE = "mtype";
public final static String MMX_ATTR_STAMP = "stamp";
public final static String MMX_ATTR_CHUNK = "chunk";
public final static String MMX_ATTR_CID = "cid";
public final static String MMX_ATTR_DST = "dst";
/**
* User extended properties as metadata of UserCreate; this goes into the User tables
*/
public final static String MMX_PROP_NAME_USER_GUEST_MODE = "guest";
public final static String MMX_PROP_VALUE_USER_GUEST_TRUE = "true";
public final static String MMX_PROP_VALUE_USER_GUEST_FALSE = "false";
public final static String MMX_PROP_VALUE_USER_GUEST_REMOVE = "remove";
public static enum UserCreateMode {
/**
* Anonymous user as guest.
*/
GUEST, // create the user as a guest user
/**
* Regular authenticated user.
*/
UPGRADE_USER // upgrade to real user; mark current logged in user as deactive if in guest mode
}
public final static String UTF8_CHARSET = "utf-8";
public final static int STATUS_CODE_200 = 200;
public final static int STATUS_CODE_400 = 400;
public final static int STATUS_CODE_500 = 500;
/**
* Commands for device management.
*/
public static enum DeviceCommand {
REGISTER,
UNREGISTER,
QUERY,
GETTAGS,
SETTAGS,
ADDTAGS,
REMOVETAGS,
}
/**
* Commands for application management.
*/
public static enum AppMgmtCommand {
create,
read,
readMine,
update,
delete,
}
/**
* Commands for account (user) management.
*/
public static enum UserCommand {
create,
delete,
query,
get,
list,
search,
update,
reset,
getTags,
setTags,
addTags,
removeTags,
searchByTags,
}
/**
* Commands for wake-up messages.
*/
public static enum PingPongCommand {
/**
* One way request without any response.
*/
ping,
/**
* One way response from the two-way request.
*/
pong,
/**
* Two-way request: one-way request and one-way response.
*/
pingpong,
/**
* Send a notification using the Notification payload.
*/
notify,
/**
* Wakeup the device and ask it to phone home
*/
retrieve,
/**
* Pubsub wakeup with the PubSubNotification payload.
*/
pubsub,
}
/**
* Possible message states returned by the MessageManager.
*/
public static enum MessageState {
/**
* The message is in an unknown state.
*/
UNKNOWN,
/**
* client-only: the message has not been communicated MMX and can be cancelled.
*/
CLIENT_PENDING,
/**
* Every message starts in this state
*/
PENDING,
/**
* Multicast message has been submitted to the server.
*/
SUBMITTED,
/**
* Message has been accepted and validated by the server.
*/
ACCEPTED,
/**
* Recipient is offline and hence we need to send a wake-up notification
*/
WAKEUP_REQUIRED,
/**
* Message wake up has been timed out
*/
WAKEUP_TIMEDOUT,
/**
* We are waiting for recipient to wake up
*/
WAKEUP_SENT,
/**
* Recipient is online and hence we transitioned to this
* state
*/
DELIVERY_ATTEMPTED,
/**
* XMPP packet has been delivered to the endpoint
*/
DELIVERED,
/**
* Message has been processed by the endpoint
*/
RECEIVED,
/**
* Timeout experienced by server when attempting delivery
*/
TIMEDOUT,
}
/**
* Commands for message management. The setEvents/getEvents/addEvents/removeEvents
* are applicable to push messages in PushManager.
*/
public static enum MessageCommand {
query,
ack,
setTags,
getTags,
addTags,
removeTags,
setEvents,
getEvents,
addEvents,
removeEvents,
}
/**
* Commands for PubSub.
*/
public static enum PubSubCommand {
/**
* Get the latest published items.
*/
getlatest,
/**
* List all nodes under an app ID.
*/
listtopics,
/**
* Create a topic.
*/
createtopic,
/**
* Delete a topic.
*/
deletetopic,
/**
* Get topic information by a topic ID.
* @deprecated #getTopics
*/
getTopic,
/**
* Get topic information by topic ID's.
*/
getTopics,
/**
* Retract a published item or all items.
*/
retract,
/**
* Retract all published items from a topic owned by a user.
*/
retractall,
/**
* Subscribe to a topic.
*/
subscribe,
/**
* Unsubscribe a subscription.
*/
unsubscribe,
/**
* Unsubscribe all topics for a device.
*/
unsubscribeForDev,
/**
* Get the summary of topics.
*/
getSummary,
/**
* Get the tags
*/
getTags,
/**
* Set the tags
*/
setTags,
/**
* Add the tags
*/
addTags,
/**
* Remove the tags
*/
removeTags,
/**
* Query for topics
* @deprecated Use {@link #searchTopic}
*/
queryTopic,
/**
* Search for topics.
*/
searchTopic,
/*
* Fetch published items
*/
fetch,
/**
* Search topics by tags
*/
searchByTags,
/**
* Get published items by item ID's
*/
getItems,
/**
* Get all subscribers to a topic.
*/
getSubscribers,
}
// constants used in top level push payloads
// define it here for Android sharing and gson serialized names
/**
* Name of the push title
*/
public static final String PAYLOAD_PUSH_TITLE = "title";
/**
* Name of the push body text
*/
public static final String PAYLOAD_PUSH_BODY = "body";
/**
* Name of the icon
*/
public static final String PAYLOAD_PUSH_ICON = "icon";
/**
* Name of the sound
*/
public static final String PAYLOAD_PUSH_SOUND = "sound";
/**
* Name of the badge
*/
public static final String PAYLOAD_PUSH_BADGE = "badge";
//constants related to mmx dictionary in push/ping payloads
/**
* Name of the mmx dictionary element
*/
public static final String PAYLOAD_MMX_KEY = "_mmx";
/**
* Key for the callback url value
*/
public static final String PAYLOAD_CALLBACK_URL_KEY = "cu";
/**
* Key for the type value
*/
public static final String PAYLOAD_TYPE_KEY = "ty";
/**
* Key for the id value
*/
public static final String PAYLOAD_ID_KEY = "id";
/**
* Key for the custom dictionary
*/
public static final String PAYLOAD_CUSTOM_KEY = "custom";
/**
* The display name for all-versions of Android topic.
*/
public static final String MMX_TOPIC_ANDROID_ALL = "Android-All";
/**
* The display name for all versions of iOS topic.
*/
public static final String MMX_TOPIC_IOS_ALL = "iOS-All";
/**
* A partial display name for user's geo-location topic.
*/
public static final String MMX_TOPIC_GEOLOCATION = "GeoLocation";
/**
* A special address for MMX multicast. When a message has multiple
* recipients, the message should be sent to this address.
*/
public static final String MMX_MULTICAST = "mmx$multicast";
/**
* Keys for the unicast message server ack, multicast message acks.
*/
public static final String SERVER_ACK_KEY = "serverack";
public static final String BEGIN_ACK_KEY = "beginack";
public static final String END_ACK_KEY = "endack";
/**
* Flag indicated if MMX is integrated with MMS.
*/
public static final boolean MMS_INTEGRATION_ENABLED = true;
}
| magnetsystems/message-common | src/main/java/com/magnet/mmx/protocol/Constants.java | Java | apache-2.0 | 12,172 |
package cgeo.geocaching.filters.gui;
import cgeo.geocaching.R;
import cgeo.geocaching.activity.AbstractActionBarActivity;
import cgeo.geocaching.databinding.CacheFilterActivityBinding;
import cgeo.geocaching.databinding.CacheFilterListItemBinding;
import cgeo.geocaching.filters.core.AndGeocacheFilter;
import cgeo.geocaching.filters.core.BaseGeocacheFilter;
import cgeo.geocaching.filters.core.GeocacheFilter;
import cgeo.geocaching.filters.core.GeocacheFilterContext;
import cgeo.geocaching.filters.core.GeocacheFilterType;
import cgeo.geocaching.filters.core.IGeocacheFilter;
import cgeo.geocaching.filters.core.LogicalGeocacheFilter;
import cgeo.geocaching.filters.core.NotGeocacheFilter;
import cgeo.geocaching.filters.core.OrGeocacheFilter;
import cgeo.geocaching.models.Geocache;
import cgeo.geocaching.ui.TextParam;
import cgeo.geocaching.ui.TextSpinner;
import cgeo.geocaching.ui.ViewUtils;
import cgeo.geocaching.ui.dialog.Dialogs;
import cgeo.geocaching.ui.dialog.SimpleDialog;
import cgeo.geocaching.ui.recyclerview.ManagedListAdapter;
import cgeo.geocaching.utils.Log;
import cgeo.geocaching.utils.TextUtils;
import static cgeo.geocaching.filters.core.GeocacheFilterContext.FilterType.TRANSIENT;
import android.app.Activity;
import android.content.Intent;
import android.content.res.Configuration;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.CheckBox;
import androidx.annotation.NonNull;
import androidx.recyclerview.widget.RecyclerView;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.jetbrains.annotations.NotNull;
/**
* Show a filter selection using an {@code ExpandableListView}.
*/
public class GeocacheFilterActivity extends AbstractActionBarActivity {
public static final int REQUEST_SELECT_FILTER = 456;
public static final String EXTRA_FILTER_CONTEXT = "efc";
private static final String STATE_CURRENT_FILTER = "state_current_filter";
private static final String STATE_ADVANCED_VIEW = "state_advanced_view";
private static final String STATE_FILTER_CONTEXT = "state_filter_context";
private static final String STATE_ORIGINAL_FILTER_CONFIG = "state_original_filter_config";
private static final GeocacheFilterType[] BASIC_FILTER_TYPES =
new GeocacheFilterType[]{GeocacheFilterType.TYPE, GeocacheFilterType.DIFFICULTY_TERRAIN, GeocacheFilterType.STATUS };
private static final Set<GeocacheFilterType> BASIC_FILTER_TYPES_SET = new HashSet<>(Arrays.asList(BASIC_FILTER_TYPES));
private static final GeocacheFilterType[] INTERNAL_FILTER_TYPES =
new GeocacheFilterType[]{GeocacheFilterType.DIFFICULTY, GeocacheFilterType.TERRAIN };
private static final Set<GeocacheFilterType> INTERNAL_FILTER_TYPES_SET = new HashSet<>(Arrays.asList(INTERNAL_FILTER_TYPES));
private GeocacheFilterContext filterContext = new GeocacheFilterContext(TRANSIENT);
private String originalFilterConfig;
private CacheFilterActivityBinding binding;
private FilterListAdapter filterListAdapter;
private CheckBox andOrFilterCheckbox;
private CheckBox inverseFilterCheckbox;
private CheckBox includeInconclusiveFilterCheckbox;
private final TextSpinner<GeocacheFilterType> addFilter = new TextSpinner<>();
@Override
public void onCreate(final Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setThemeAndContentView(R.layout.cache_filter_activity);
binding = CacheFilterActivityBinding.bind(findViewById(R.id.activity_viewroot));
binding.filterPropsCheckboxes.removeAllViews();
this.andOrFilterCheckbox = ViewUtils.addCheckboxItem(this, binding.filterPropsCheckboxes, TextParam.id(R.string.cache_filter_option_and_or), R.drawable.ic_menu_logic);
this.inverseFilterCheckbox = ViewUtils.addCheckboxItem(this, binding.filterPropsCheckboxes, TextParam.id(R.string.cache_filter_option_inverse), R.drawable.ic_menu_invert);
this.includeInconclusiveFilterCheckbox = ViewUtils.addCheckboxItem(this, binding.filterPropsCheckboxes, TextParam.id(R.string.cache_filter_option_include_inconclusive), R.drawable.ic_menu_vague,
TextParam.id(R.string.cache_filter_option_include_inconclusive_info));
filterListAdapter = new FilterListAdapter(binding.filterList);
initializeFilterAdd();
initializeStorageOptions();
// Get parameters from intent and basic cache information from database
final Bundle extras = getIntent().getExtras();
if (extras != null) {
filterContext = extras.getParcelable(EXTRA_FILTER_CONTEXT);
}
if (filterContext == null) {
filterContext = new GeocacheFilterContext(TRANSIENT);
}
setTitle(getString(filterContext.getType().titleId));
fillViewFromFilter(filterContext.get().toConfig(), false);
originalFilterConfig = getFilterFromView().toConfig();
this.binding.filterBasicAdvanced.setOnCheckedChangeListener((v, c) -> {
if (c) {
switchToAdvanced();
} else if (isBasicPossibleWithoutLoss()) {
switchToBasic();
} else {
SimpleDialog.of(this).setTitle(R.string.cache_filter_mode_basic_change_confirm_loss_title).setMessage(R.string.cache_filter_mode_basic_change_confirm_loss_message).confirm(
(vv, ii) -> switchToBasic(), (vv, ii) -> this.binding.filterBasicAdvanced.setChecked(true));
}
});
}
@Override
public void onConfigurationChanged(@NonNull final Configuration newConfig) {
super.onConfigurationChanged(newConfig);
}
private void initializeStorageOptions() {
//handling of "save" button
binding.filterStorageSave.setOnClickListener(v -> {
String filterName = binding.filterStorageName.getText().toString();
if (filterName.endsWith("*")) {
filterName = filterName.substring(0, filterName.length() - 1);
}
SimpleDialog.of(this).setTitle(R.string.cache_filter_storage_save_title)
.input(-1, filterName, null, null, newName -> {
final GeocacheFilter filter = getFilterFromView();
if (GeocacheFilter.Storage.existsAndDiffers(newName, filter)) {
SimpleDialog.of(this).setTitle(R.string.cache_filter_storage_save_confirm_title).setMessage(R.string.cache_filter_storage_save_confirm_message, newName).confirm(
(dialog, which) -> saveAs(newName));
} else {
saveAs(newName);
}
});
});
ViewUtils.setTooltip(binding.filterStorageSave, TextParam.id(R.string.cache_filter_storage_save_title));
//handling of "load/delete" button
binding.filterStorageManage.setOnClickListener(v -> {
final List<GeocacheFilter> filters = new ArrayList<>(GeocacheFilter.Storage.getStoredFilters());
if (filters.isEmpty()) {
SimpleDialog.of(this).setTitle(R.string.cache_filter_storage_load_delete_title).setMessage(R.string.cache_filter_storage_load_delete_nofilter_message).show();
} else {
Dialogs.selectItemDialogWithAdditionalDeleteButton(this, R.string.cache_filter_storage_load_delete_title,
filters, (f) -> TextParam.text(f.getName()),
// select listener
(f) -> fillViewFromFilter(f.toConfig(), isAdvancedView()),
// delete listener
(f) -> SimpleDialog.of(this).setTitle(R.string.cache_filter_storage_delete_title)
.setMessage(R.string.cache_filter_storage_delete_message)
.confirm((dialog, which) -> {
GeocacheFilter.Storage.delete(f);
//if currently shown view was just deleted -> then delete it in view as well
if (f.getName().contentEquals(binding.filterStorageName.getText())) {
binding.filterStorageName.setText("");
}
})
);
}
});
ViewUtils.setTooltip(binding.filterStorageManage, TextParam.id(R.string.cache_filter_storage_load_delete_title));
}
private void saveAs(final String newName) {
binding.filterStorageName.setText(newName);
final GeocacheFilter filter = getFilterFromView();
GeocacheFilter.Storage.save(filter);
}
@Override
public void onSaveInstanceState(@NonNull final Bundle outState) {
super.onSaveInstanceState(outState);
outState.putString(STATE_CURRENT_FILTER, getFilterFromView().toConfig());
outState.putBoolean(STATE_ADVANCED_VIEW, isAdvancedView());
outState.putParcelable(STATE_FILTER_CONTEXT, filterContext);
outState.putString(STATE_ORIGINAL_FILTER_CONFIG, originalFilterConfig);
}
@Override
protected void onRestoreInstanceState(@NonNull final Bundle savedInstanceState) {
super.onRestoreInstanceState(savedInstanceState);
if (savedInstanceState.getString(STATE_CURRENT_FILTER) != null) {
fillViewFromFilter(savedInstanceState.getString(STATE_CURRENT_FILTER), savedInstanceState.getBoolean(STATE_ADVANCED_VIEW));
}
filterContext = (GeocacheFilterContext) savedInstanceState.getSerializable(STATE_FILTER_CONTEXT);
if (filterContext == null) {
filterContext = new GeocacheFilterContext(TRANSIENT);
}
originalFilterConfig = savedInstanceState.getString(STATE_ORIGINAL_FILTER_CONFIG);
}
@Override
public boolean onCreateOptionsMenu(final Menu menu) {
getMenuInflater().inflate(R.menu.menu_ok_cancel, menu);
menu.findItem(R.id.menu_item_delete).setVisible(true);
return true;
}
@Override
public boolean onOptionsItemSelected(final MenuItem item) {
// Handle presses on the action bar items
final int itemId = item.getItemId();
if (itemId == R.id.menu_item_delete) {
clearView();
return true;
} else if (itemId == R.id.menu_item_save) {
finishWithResult();
return true;
} else if (itemId == R.id.menu_item_cancel) {
finish();
return true;
} else if (itemId == android.R.id.home) {
onBackPressed();
return true;
}
return false;
}
private void fillViewFromFilter(final String inputFilter, final boolean forceAdvanced) {
includeInconclusiveFilterCheckbox.setChecked(false);
inverseFilterCheckbox.setChecked(false);
andOrFilterCheckbox.setChecked(false);
boolean setAdvanced = false;
if (inputFilter != null) {
try {
final List<IFilterViewHolder<?>> filterList = new ArrayList<>();
final GeocacheFilter filter = GeocacheFilter.checkConfig(inputFilter);
binding.filterStorageName.setText(filter.getNameForUserDisplay());
includeInconclusiveFilterCheckbox.setChecked(filter.isIncludeInconclusive());
setAdvanced = filter.isOpenInAdvancedMode();
IGeocacheFilter filterTree = filter.getTree();
if (filterTree instanceof NotGeocacheFilter) {
inverseFilterCheckbox.setChecked(true);
filterTree = filterTree.getChildren().get(0);
}
if (filterTree instanceof LogicalGeocacheFilter) {
andOrFilterCheckbox.setChecked(filterTree instanceof OrGeocacheFilter);
for (IGeocacheFilter c : filterTree.getChildren()) {
filterList.add(FilterViewHolderCreator.createFor(c, this));
}
}
if (filterTree instanceof BaseGeocacheFilter) {
filterList.add(FilterViewHolderCreator.createFor(filterTree, this));
}
filterListAdapter.setItems(filterList);
adjustFilterEmptyView();
//filterListAdapter.submitList(filterList, this::adjustFilterEmptyView);
} catch (ParseException pe) {
Log.w("Exception parsing input filter", pe);
}
}
//set basic/advanced switch
if (!forceAdvanced && !setAdvanced && isBasicPossibleWithoutLoss()) {
this.binding.filterBasicAdvanced.setChecked(false);
switchToBasic();
} else {
this.binding.filterBasicAdvanced.setChecked(true);
switchToAdvanced();
}
}
private void initializeFilterAdd() {
final List<GeocacheFilterType> filterTypes = new ArrayList<>(Arrays.asList(GeocacheFilterType.values()));
filterTypes.removeAll(INTERNAL_FILTER_TYPES_SET);
Collections.sort(filterTypes, (left, right) -> TextUtils.COLLATOR.compare(left.getUserDisplayableName(), right.getUserDisplayableName()));
addFilter.setValues(filterTypes)
.setDisplayMapper(GeocacheFilterType::getUserDisplayableName)
.setTextHideSelectionMarker(true)
.setView(binding.filterAdditem, (v, t) -> { })
.setTextGroupMapper(GeocacheFilterType::getUserDisplayableGroup)
.setChangeListener(gcf -> {
filterListAdapter.addItem(0, FilterViewHolderCreator.createFor(gcf, this));
binding.filterList.smoothScrollToPosition(0);
adjustFilterEmptyView();
}, false);
}
private void adjustFilterEmptyView() {
final boolean listIsEmpty = filterListAdapter.getItemCount() == 0;
binding.filterList.setVisibility(listIsEmpty ? View.GONE : View.VISIBLE);
binding.filterListEmpty.setVisibility(!listIsEmpty ? View.GONE : View.VISIBLE);
}
private void clearView() {
filterListAdapter.clearList();
andOrFilterCheckbox.setChecked(false);
inverseFilterCheckbox.setChecked(false);
includeInconclusiveFilterCheckbox.setChecked(false);
binding.filterStorageName.setText("");
if (!isAdvancedView()) {
switchToBasic();
}
adjustFilterEmptyView();
}
private void finishWithResult() {
final Intent resultIntent = new Intent();
final GeocacheFilter newFilter = getFilterFromView();
filterContext.set(newFilter);
resultIntent.putExtra(EXTRA_FILTER_CONTEXT, filterContext);
FilterViewHolderCreator.clearListInfo();
setResult(Activity.RESULT_OK, resultIntent);
finish();
}
@Override
public void onBackPressed() {
final GeocacheFilter newFilter = getFilterFromView();
final boolean filterWasChanged = !originalFilterConfig.equals(newFilter.toConfig());
if (filterWasChanged) {
SimpleDialog.of(this).setTitle(R.string.confirm_unsaved_changes_title).setMessage(R.string.confirm_discard_changes).confirm((dialog, which) -> finish());
} else {
finish();
}
}
@NotNull
private GeocacheFilter getFilterFromView() {
IGeocacheFilter filter = null;
if (filterListAdapter.getItemCount() > 0) {
filter = andOrFilterCheckbox.isChecked() ? new OrGeocacheFilter() : new AndGeocacheFilter();
for (IFilterViewHolder<?> f : filterListAdapter.getItems()) {
filter.addChild(FilterViewHolderCreator.createFrom(f));
}
if (inverseFilterCheckbox.isChecked()) {
final IGeocacheFilter notFilter = new NotGeocacheFilter();
notFilter.addChild(filter);
filter = notFilter;
}
}
return GeocacheFilter.create(
binding.filterStorageName.getText().toString(),
binding.filterBasicAdvanced.isChecked(),
this.includeInconclusiveFilterCheckbox.isChecked(),
filter);
}
public static void selectFilter(@NonNull final Activity context, final GeocacheFilterContext filterContext,
final Collection<Geocache> filteredList, final boolean isComplete) {
final Intent intent = new Intent(context, GeocacheFilterActivity.class);
intent.putExtra(EXTRA_FILTER_CONTEXT, filterContext);
FilterViewHolderCreator.setListInfo(filteredList, isComplete);
context.startActivityForResult(intent, REQUEST_SELECT_FILTER);
}
private boolean isBasicPossibleWithoutLoss() {
if (!StringUtils.isBlank(binding.filterStorageName.getText()) ||
this.inverseFilterCheckbox.isChecked() ||
this.andOrFilterCheckbox.isChecked() ||
this.includeInconclusiveFilterCheckbox.isChecked()) {
return false;
}
final Set<GeocacheFilterType> found = new HashSet<>();
for (IFilterViewHolder<?> fvh : filterListAdapter.getItems()) {
if (!BASIC_FILTER_TYPES_SET.contains(fvh.getType()) || found.contains(fvh.getType())) {
return false;
}
if (fvh.isOf(StatusFilterViewHolder.class) && !fvh.castTo(StatusFilterViewHolder.class).canBeSimplifiedLossless()) {
return false;
}
found.add(fvh.getType());
}
return true;
}
private void switchToAdvanced() {
this.binding.filterBasicAdvanced.setChecked(true);
this.binding.filterStorageOptions.setVisibility(View.VISIBLE);
this.binding.filterStorageOptionsLine.setVisibility(View.VISIBLE);
this.binding.filterPropsCheckboxes.setVisibility(View.VISIBLE);
this.binding.filterPropsCheckboxesLine.setVisibility(View.VISIBLE);
this.binding.filterAdditem.setVisibility(View.VISIBLE);
// start with the highest index, as we will remove all filters which are not actively filtering
for (int pos = filterListAdapter.getItemCount() - 1; pos >= 0; pos--) {
final ItemHolder itemHolder = (ItemHolder) this.binding.filterList.findViewHolderForLayoutPosition(pos);
if (itemHolder != null) {
if (!itemHolder.getFilterViewHolder().createFilterFromView().isFiltering()) {
this.filterListAdapter.removeItem(pos);
continue;
}
itemHolder.setControlsEnabled(true);
if (itemHolder.getFilterViewHolder().isOf(StatusFilterViewHolder.class)) {
itemHolder.getFilterViewHolder().castTo(StatusFilterViewHolder.class).setSimpleView(false);
}
}
}
adjustFilterEmptyView();
}
private void switchToBasic() {
this.binding.filterBasicAdvanced.setChecked(false);
this.binding.filterStorageName.setText("");
this.inverseFilterCheckbox.setChecked(false);
this.andOrFilterCheckbox.setChecked(false);
this.includeInconclusiveFilterCheckbox.setChecked(false);
this.binding.filterStorageOptions.setVisibility(View.GONE);
this.binding.filterStorageOptionsLine.setVisibility(View.GONE);
this.binding.filterPropsCheckboxes.setVisibility(View.GONE);
this.binding.filterPropsCheckboxesLine.setVisibility(View.GONE);
this.binding.filterAdditem.setVisibility(View.GONE);
for (int pos = 0; pos < filterListAdapter.getItemCount(); pos++) {
final ItemHolder itemHolder = (ItemHolder) this.binding.filterList.findViewHolderForLayoutPosition(pos);
if (itemHolder != null) {
itemHolder.setControlsEnabled(false);
}
}
int startPos = 0;
for (GeocacheFilterType type : BASIC_FILTER_TYPES) {
boolean found = false;
for (int pos = startPos; pos < this.filterListAdapter.getItemCount(); pos++) {
final IFilterViewHolder<?> fvh = this.filterListAdapter.getItem(pos);
if (fvh.getType() == type) {
if (pos > startPos) {
final IFilterViewHolder<?> item = this.filterListAdapter.removeItem(pos);
this.filterListAdapter.addItem(startPos, item);
}
final IFilterViewHolder<?> item = this.filterListAdapter.getItem(startPos);
if (item.isOf(StatusFilterViewHolder.class)) {
item.castTo(StatusFilterViewHolder.class).setSimpleView(true);
}
found = true;
break;
}
}
if (!found) {
final IFilterViewHolder<?> item = FilterViewHolderCreator.createFor(type, this);
if (item.isOf(StatusFilterViewHolder.class)) {
item.castTo(StatusFilterViewHolder.class).setSimpleView(true);
}
this.filterListAdapter.addItem(startPos, item);
}
startPos++;
}
while (this.filterListAdapter.getItemCount() > BASIC_FILTER_TYPES.length) {
this.filterListAdapter.removeItem(this.filterListAdapter.getItemCount() - 1);
}
adjustFilterEmptyView();
}
private boolean isAdvancedView() {
return this.binding.filterBasicAdvanced.isChecked();
}
private static class ItemHolder extends RecyclerView.ViewHolder {
private final CacheFilterListItemBinding binding;
private IFilterViewHolder<?> filterViewHolder;
ItemHolder(final View rowView) {
super(rowView);
binding = CacheFilterListItemBinding.bind(rowView);
}
public void setFilterViewHolder(final IFilterViewHolder<?> filterViewHolder) {
this.filterViewHolder = filterViewHolder;
this.binding.filterTitle.setText(this.filterViewHolder.getType().getUserDisplayableName());
//create view
final View view = filterViewHolder.getView();
// insert into main view
final ViewGroup insertPoint = this.binding.insertPoint;
insertPoint.removeAllViews(); //views are reused, so make sure to cleanup
if (view.getParent() != null) {
((ViewGroup) view.getParent()).removeAllViews();
}
insertPoint.addView(view);
}
public IFilterViewHolder<?> getFilterViewHolder() {
return this.filterViewHolder;
}
public void setControlsEnabled(final boolean enabled) {
binding.filterDelete.setVisibility(enabled ? View.VISIBLE : View.GONE);
binding.filterDrag.setVisibility(enabled ? View.VISIBLE : View.GONE);
}
}
private final class FilterListAdapter extends ManagedListAdapter<IFilterViewHolder<?>, ItemHolder> {
private FilterListAdapter(final RecyclerView recyclerView) {
super(new ManagedListAdapter.Config(recyclerView)
.setNotifyOnPositionChange(true)
.setSupportDragDrop(true));
}
private void fillViewHolder(final ItemHolder holder, final IFilterViewHolder<?> filterViewHolder) {
if (filterViewHolder == null) {
return;
}
holder.setFilterViewHolder(filterViewHolder);
setTheme();
}
@NonNull
@Override
public ItemHolder onCreateViewHolder(@NonNull final ViewGroup parent, final int viewType) {
final View view = LayoutInflater.from(parent.getContext()).inflate(R.layout.cache_filter_list_item, parent, false);
final ItemHolder viewHolder = new ItemHolder(view);
viewHolder.setControlsEnabled(isAdvancedView());
viewHolder.binding.filterDelete.setOnClickListener(v -> {
removeItem(viewHolder.getBindingAdapterPosition());
adjustFilterEmptyView();
});
registerStartDrag(viewHolder, viewHolder.binding.filterDrag);
return viewHolder;
}
@Override
public void onBindViewHolder(@NonNull final ItemHolder holder, final int position) {
holder.setControlsEnabled(isAdvancedView());
fillViewHolder(holder, getItem(position));
}
}
@Override
public void onDestroy() {
super.onDestroy();
}
}
| rsudev/c-geo-opensource | main/src/cgeo/geocaching/filters/gui/GeocacheFilterActivity.java | Java | apache-2.0 | 24,922 |
/*
* Copyright 2015 JAXIO http://www.jaxio.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jaxio.celerio.spi.example;
import com.jaxio.celerio.model.Entity;
import com.jaxio.celerio.spi.EntitySpi;
public class ExampleEntity implements EntitySpi {
private Entity entity;
@Override
public void init(Entity entity) {
this.entity = entity;
}
@Override
public String velocityVar() {
return "example";
}
@Override
public Object getTarget() {
return this;
}
public String getHello() {
return "Hello from ExampleEntity: this entity has " + entity.getCurrentAttributes().size() + " attributes";
}
} | jaxio/celerio | celerio-spi-example/src/main/java/com/jaxio/celerio/spi/example/ExampleEntity.java | Java | apache-2.0 | 1,208 |
package freeyourstuff.data.model;
import java.io.IOException;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
public class ItemMapper {
private static ObjectMapper mapper = new ObjectMapper();
public static String mapToJson(Item item) throws JsonGenerationException, JsonMappingException, IOException{
return mapper.writeValueAsString(item);
}
public static Item mapToItem(String jsonString) throws JsonParseException, JsonMappingException, IOException{
return mapper.readValue(jsonString, Item.class);
}
}
| nicolaierbs/free-your-stuff-server | src/main/java/freeyourstuff/data/model/ItemMapper.java | Java | apache-2.0 | 684 |
// Generated from /POI/java/org/apache/poi/hpsf/MarkUnsupportedException.java
#include <org/apache/poi/hpsf/MarkUnsupportedException.hpp>
poi::hpsf::MarkUnsupportedException::MarkUnsupportedException(const ::default_init_tag&)
: super(*static_cast< ::default_init_tag* >(0))
{
clinit();
}
poi::hpsf::MarkUnsupportedException::MarkUnsupportedException()
: MarkUnsupportedException(*static_cast< ::default_init_tag* >(0))
{
ctor();
}
poi::hpsf::MarkUnsupportedException::MarkUnsupportedException(::java::lang::String* msg)
: MarkUnsupportedException(*static_cast< ::default_init_tag* >(0))
{
ctor(msg);
}
poi::hpsf::MarkUnsupportedException::MarkUnsupportedException(::java::lang::Throwable* reason)
: MarkUnsupportedException(*static_cast< ::default_init_tag* >(0))
{
ctor(reason);
}
poi::hpsf::MarkUnsupportedException::MarkUnsupportedException(::java::lang::String* msg, ::java::lang::Throwable* reason)
: MarkUnsupportedException(*static_cast< ::default_init_tag* >(0))
{
ctor(msg,reason);
}
void poi::hpsf::MarkUnsupportedException::ctor()
{
super::ctor();
}
void poi::hpsf::MarkUnsupportedException::ctor(::java::lang::String* msg)
{
super::ctor(msg);
}
void poi::hpsf::MarkUnsupportedException::ctor(::java::lang::Throwable* reason)
{
super::ctor(reason);
}
void poi::hpsf::MarkUnsupportedException::ctor(::java::lang::String* msg, ::java::lang::Throwable* reason)
{
super::ctor(msg, reason);
}
extern java::lang::Class *class_(const char16_t *c, int n);
java::lang::Class* poi::hpsf::MarkUnsupportedException::class_()
{
static ::java::lang::Class* c = ::class_(u"org.apache.poi.hpsf.MarkUnsupportedException", 44);
return c;
}
java::lang::Class* poi::hpsf::MarkUnsupportedException::getClass0()
{
return class_();
}
| pebble2015/cpoi | src/org/apache/poi/hpsf/MarkUnsupportedException.cpp | C++ | apache-2.0 | 1,808 |
const
db = require('./db')
;
let schema = new db.Schema({
owner: { type: String, indexed: true, required: true }, // user.id
name: { type: String, required: true },
path: { type: String, unique: true, required: true },
info: db.Schema.Types.Mixed,
content: db.Schema.Types.Mixed
}, { timestamps: db.timestamps });
module.exports = db.model('Resource', schema); | arthurmilliken/gateway | models/resource.js | JavaScript | apache-2.0 | 374 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.memory;
import com.google.common.base.Predicate;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import io.airlift.stats.TestingGcMonitor;
import io.airlift.units.DataSize;
import io.prestosql.Session;
import io.prestosql.execution.buffer.TestingPagesSerdeFactory;
import io.prestosql.memory.context.LocalMemoryContext;
import io.prestosql.operator.Driver;
import io.prestosql.operator.DriverContext;
import io.prestosql.operator.Operator;
import io.prestosql.operator.OperatorContext;
import io.prestosql.operator.OutputFactory;
import io.prestosql.operator.TableScanOperator;
import io.prestosql.operator.TaskContext;
import io.prestosql.plugin.tpch.TpchConnectorFactory;
import io.prestosql.spi.Page;
import io.prestosql.spi.QueryId;
import io.prestosql.spi.memory.MemoryPoolId;
import io.prestosql.spiller.SpillSpaceTracker;
import io.prestosql.sql.planner.plan.PlanNodeId;
import io.prestosql.testing.LocalQueryRunner;
import io.prestosql.testing.PageConsumerOperator.PageConsumerOutputFactory;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import static com.google.common.base.Preconditions.checkState;
import static io.airlift.units.DataSize.Unit.BYTE;
import static io.airlift.units.DataSize.Unit.GIGABYTE;
import static io.airlift.units.DataSize.Unit.MEGABYTE;
import static io.prestosql.testing.LocalQueryRunner.queryRunnerWithInitialTransaction;
import static io.prestosql.testing.TestingSession.testSessionBuilder;
import static io.prestosql.testing.TestingTaskContext.createTaskContext;
import static java.lang.String.format;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
@Test(singleThreaded = true)
public class TestMemoryPools
{
private static final DataSize TEN_MEGABYTES = new DataSize(10, MEGABYTE);
private static final DataSize TEN_MEGABYTES_WITHOUT_TWO_BYTES = new DataSize(TEN_MEGABYTES.toBytes() - 2, BYTE);
private static final DataSize ONE_BYTE = new DataSize(1, BYTE);
private QueryId fakeQueryId;
private LocalQueryRunner localQueryRunner;
private MemoryPool userPool;
private List<Driver> drivers;
private TaskContext taskContext;
private void setUp(Supplier<List<Driver>> driversSupplier)
{
checkState(localQueryRunner == null, "Already set up");
Session session = testSessionBuilder()
.setCatalog("tpch")
.setSchema("tiny")
.setSystemProperty("task_default_concurrency", "1")
.build();
localQueryRunner = queryRunnerWithInitialTransaction(session);
// add tpch
localQueryRunner.createCatalog("tpch", new TpchConnectorFactory(1), ImmutableMap.of());
userPool = new MemoryPool(new MemoryPoolId("test"), TEN_MEGABYTES);
fakeQueryId = new QueryId("fake");
SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(new DataSize(1, GIGABYTE));
QueryContext queryContext = new QueryContext(new QueryId("query"),
TEN_MEGABYTES,
new DataSize(20, MEGABYTE),
userPool,
new TestingGcMonitor(),
localQueryRunner.getExecutor(),
localQueryRunner.getScheduler(),
TEN_MEGABYTES,
spillSpaceTracker);
taskContext = createTaskContext(queryContext, localQueryRunner.getExecutor(), session);
drivers = driversSupplier.get();
}
private void setUpCountStarFromOrdersWithJoin()
{
// query will reserve all memory in the user pool and discard the output
setUp(() -> {
OutputFactory outputFactory = new PageConsumerOutputFactory(types -> (page -> {}));
return localQueryRunner.createDrivers("SELECT COUNT(*) FROM orders JOIN lineitem ON CAST(orders.orderkey AS VARCHAR) = CAST(lineitem.orderkey AS VARCHAR)", outputFactory, taskContext);
});
}
private RevocableMemoryOperator setupConsumeRevocableMemory(DataSize reservedPerPage, long numberOfPages)
{
AtomicReference<RevocableMemoryOperator> createOperator = new AtomicReference<>();
setUp(() -> {
DriverContext driverContext = taskContext.addPipelineContext(0, false, false, false).addDriverContext();
OperatorContext revokableOperatorContext = driverContext.addOperatorContext(
Integer.MAX_VALUE,
new PlanNodeId("revokable_operator"),
TableScanOperator.class.getSimpleName());
OutputFactory outputFactory = new PageConsumerOutputFactory(types -> (page -> {}));
Operator outputOperator = outputFactory.createOutputOperator(2, new PlanNodeId("output"), ImmutableList.of(), Function.identity(), new TestingPagesSerdeFactory()).createOperator(driverContext);
RevocableMemoryOperator revocableMemoryOperator = new RevocableMemoryOperator(revokableOperatorContext, reservedPerPage, numberOfPages);
createOperator.set(revocableMemoryOperator);
Driver driver = Driver.createDriver(driverContext, revocableMemoryOperator, outputOperator);
return ImmutableList.of(driver);
});
return createOperator.get();
}
@AfterMethod(alwaysRun = true)
public void tearDown()
{
if (localQueryRunner != null) {
localQueryRunner.close();
localQueryRunner = null;
}
}
@Test
public void testBlockingOnUserMemory()
{
setUpCountStarFromOrdersWithJoin();
assertTrue(userPool.tryReserve(fakeQueryId, "test", TEN_MEGABYTES.toBytes()));
runDriversUntilBlocked(waitingForUserMemory());
assertTrue(userPool.getFreeBytes() <= 0, format("Expected empty pool but got [%d]", userPool.getFreeBytes()));
userPool.free(fakeQueryId, "test", TEN_MEGABYTES.toBytes());
assertDriversProgress(waitingForUserMemory());
}
@Test
public void testNotifyListenerOnMemoryReserved()
{
setupConsumeRevocableMemory(ONE_BYTE, 10);
AtomicReference<MemoryPool> notifiedPool = new AtomicReference<>();
AtomicLong notifiedBytes = new AtomicLong();
userPool.addListener(MemoryPoolListener.onMemoryReserved(pool -> {
notifiedPool.set(pool);
notifiedBytes.set(pool.getReservedBytes());
}));
userPool.reserve(fakeQueryId, "test", 3);
assertEquals(notifiedPool.get(), userPool);
assertEquals(notifiedBytes.get(), 3L);
}
@Test
public void testMemoryFutureCancellation()
{
setUpCountStarFromOrdersWithJoin();
ListenableFuture<?> future = userPool.reserve(fakeQueryId, "test", TEN_MEGABYTES.toBytes());
assertTrue(!future.isDone());
try {
future.cancel(true);
fail("cancel should fail");
}
catch (UnsupportedOperationException e) {
assertEquals(e.getMessage(), "cancellation is not supported");
}
userPool.free(fakeQueryId, "test", TEN_MEGABYTES.toBytes());
assertTrue(future.isDone());
}
@Test
public void testBlockingOnRevocableMemoryFreeUser()
{
setupConsumeRevocableMemory(ONE_BYTE, 10);
assertTrue(userPool.tryReserve(fakeQueryId, "test", TEN_MEGABYTES_WITHOUT_TWO_BYTES.toBytes()));
// we expect 2 iterations as we have 2 bytes remaining in memory pool and we allocate 1 byte per page
assertEquals(runDriversUntilBlocked(waitingForRevocableSystemMemory()), 2);
assertTrue(userPool.getFreeBytes() <= 0, format("Expected empty pool but got [%d]", userPool.getFreeBytes()));
// lets free 5 bytes
userPool.free(fakeQueryId, "test", 5);
assertEquals(runDriversUntilBlocked(waitingForRevocableSystemMemory()), 5);
assertTrue(userPool.getFreeBytes() <= 0, format("Expected empty pool but got [%d]", userPool.getFreeBytes()));
// 3 more bytes is enough for driver to finish
userPool.free(fakeQueryId, "test", 3);
assertDriversProgress(waitingForRevocableSystemMemory());
assertEquals(userPool.getFreeBytes(), 10);
}
@Test
public void testBlockingOnRevocableMemoryFreeViaRevoke()
{
RevocableMemoryOperator revocableMemoryOperator = setupConsumeRevocableMemory(ONE_BYTE, 5);
assertTrue(userPool.tryReserve(fakeQueryId, "test", TEN_MEGABYTES_WITHOUT_TWO_BYTES.toBytes()));
// we expect 2 iterations as we have 2 bytes remaining in memory pool and we allocate 1 byte per page
assertEquals(runDriversUntilBlocked(waitingForRevocableSystemMemory()), 2);
revocableMemoryOperator.getOperatorContext().requestMemoryRevoking();
// 2 more iterations
assertEquals(runDriversUntilBlocked(waitingForRevocableSystemMemory()), 2);
revocableMemoryOperator.getOperatorContext().requestMemoryRevoking();
// 3 more bytes is enough for driver to finish
assertDriversProgress(waitingForRevocableSystemMemory());
assertEquals(userPool.getFreeBytes(), 2);
}
@Test
public void testTaggedAllocations()
{
QueryId testQuery = new QueryId("test_query");
MemoryPool testPool = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE));
testPool.reserve(testQuery, "test_tag", 10);
Map<String, Long> allocations = testPool.getTaggedMemoryAllocations().get(testQuery);
assertEquals(allocations, ImmutableMap.of("test_tag", 10L));
// free 5 bytes for test_tag
testPool.free(testQuery, "test_tag", 5);
assertEquals(allocations, ImmutableMap.of("test_tag", 5L));
testPool.reserve(testQuery, "test_tag2", 20);
assertEquals(allocations, ImmutableMap.of("test_tag", 5L, "test_tag2", 20L));
// free the remaining 5 bytes for test_tag
testPool.free(testQuery, "test_tag", 5);
assertEquals(allocations, ImmutableMap.of("test_tag2", 20L));
// free all for test_tag2
testPool.free(testQuery, "test_tag2", 20);
assertEquals(testPool.getTaggedMemoryAllocations().size(), 0);
}
@Test
public void testMoveQuery()
{
QueryId testQuery = new QueryId("test_query");
MemoryPool pool1 = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE));
MemoryPool pool2 = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE));
pool1.reserve(testQuery, "test_tag", 10);
Map<String, Long> allocations = pool1.getTaggedMemoryAllocations().get(testQuery);
assertEquals(allocations, ImmutableMap.of("test_tag", 10L));
pool1.moveQuery(testQuery, pool2);
assertNull(pool1.getTaggedMemoryAllocations().get(testQuery));
allocations = pool2.getTaggedMemoryAllocations().get(testQuery);
assertEquals(allocations, ImmutableMap.of("test_tag", 10L));
assertEquals(pool1.getFreeBytes(), 1000);
assertEquals(pool2.getFreeBytes(), 990);
pool2.free(testQuery, "test", 10);
assertEquals(pool2.getFreeBytes(), 1000);
}
@Test
public void testMoveUnknownQuery()
{
QueryId testQuery = new QueryId("test_query");
MemoryPool pool1 = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE));
MemoryPool pool2 = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE));
assertNull(pool1.getTaggedMemoryAllocations().get(testQuery));
pool1.moveQuery(testQuery, pool2);
assertNull(pool1.getTaggedMemoryAllocations().get(testQuery));
assertNull(pool2.getTaggedMemoryAllocations().get(testQuery));
}
private long runDriversUntilBlocked(Predicate<OperatorContext> reason)
{
long iterationsCount = 0;
// run driver, until it blocks
while (!isOperatorBlocked(drivers, reason)) {
for (Driver driver : drivers) {
driver.process();
}
iterationsCount++;
}
// driver should be blocked waiting for memory
for (Driver driver : drivers) {
assertFalse(driver.isFinished());
}
return iterationsCount;
}
private void assertDriversProgress(Predicate<OperatorContext> reason)
{
do {
assertFalse(isOperatorBlocked(drivers, reason));
boolean progress = false;
for (Driver driver : drivers) {
ListenableFuture<?> blocked = driver.process();
progress = progress | blocked.isDone();
}
// query should not block
assertTrue(progress);
}
while (!drivers.stream().allMatch(Driver::isFinished));
}
private Predicate<OperatorContext> waitingForUserMemory()
{
return (OperatorContext operatorContext) -> !operatorContext.isWaitingForMemory().isDone();
}
private Predicate<OperatorContext> waitingForRevocableSystemMemory()
{
return (OperatorContext operatorContext) ->
!operatorContext.isWaitingForRevocableMemory().isDone() &&
!operatorContext.isMemoryRevokingRequested();
}
private static boolean isOperatorBlocked(List<Driver> drivers, Predicate<OperatorContext> reason)
{
for (Driver driver : drivers) {
for (OperatorContext operatorContext : driver.getDriverContext().getOperatorContexts()) {
if (reason.apply(operatorContext)) {
return true;
}
}
}
return false;
}
private static class RevocableMemoryOperator
implements Operator
{
private final DataSize reservedPerPage;
private final long numberOfPages;
private final OperatorContext operatorContext;
private long producedPagesCount;
private final LocalMemoryContext revocableMemoryContext;
public RevocableMemoryOperator(OperatorContext operatorContext, DataSize reservedPerPage, long numberOfPages)
{
this.operatorContext = operatorContext;
this.reservedPerPage = reservedPerPage;
this.numberOfPages = numberOfPages;
this.revocableMemoryContext = operatorContext.localRevocableMemoryContext();
}
@Override
public ListenableFuture<?> startMemoryRevoke()
{
return Futures.immediateFuture(null);
}
@Override
public void finishMemoryRevoke()
{
revocableMemoryContext.setBytes(0);
}
@Override
public OperatorContext getOperatorContext()
{
return operatorContext;
}
@Override
public void finish()
{
revocableMemoryContext.setBytes(0);
}
@Override
public boolean isFinished()
{
return producedPagesCount >= numberOfPages;
}
@Override
public boolean needsInput()
{
return false;
}
@Override
public void addInput(Page page)
{
throw new UnsupportedOperationException();
}
@Override
public Page getOutput()
{
revocableMemoryContext.setBytes(revocableMemoryContext.getBytes() + reservedPerPage.toBytes());
producedPagesCount++;
if (producedPagesCount == numberOfPages) {
finish();
}
return new Page(10);
}
}
}
| wyukawa/presto | presto-main/src/test/java/io/prestosql/memory/TestMemoryPools.java | Java | apache-2.0 | 16,621 |
# Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudferry import model
from cloudferry.model import identity
@model.type_alias('volume_attachments')
class Attachment(model.Model):
object_id = model.PrimaryKey()
server = model.Reference('cloudferry.model.compute.Server',
ensure_existence=False)
volume = model.Dependency('cloudferry.model.storage.Volume')
device = model.String(required=True)
def equals(self, other):
# pylint: disable=no-member
if super(Attachment, self).equals(other):
return True
if self.server is None:
return False
return self.server.equals(other.server) and self.device == other.device
@model.type_alias('volumes')
class Volume(model.Model):
object_id = model.PrimaryKey()
name = model.String(required=True, allow_none=True)
description = model.String(required=True, allow_none=True)
availability_zone = model.String(required=True)
encrypted = model.Boolean(missing=False)
host = model.String(required=True)
size = model.Integer(required=True)
tenant = model.Dependency(identity.Tenant, required=True)
metadata = model.Dict(missing=dict)
volume_type = model.String(required=True, allow_none=True)
| SVilgelm/CloudFerry | cloudferry/model/storage.py | Python | apache-2.0 | 1,801 |
/*
* (C) Copyright 2016 Richard Ballard.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.richardballard.packplanner.item.order;
import com.github.richardballard.packplanner.item.Item;
import com.google.common.collect.Ordering;
import org.jetbrains.annotations.NotNull;
import org.testng.annotations.Test;
import java.util.Comparator;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@Test
public class ComparatorFromSortOrderFunctionTest {
@NotNull
private Item getItem(final int lengthMm) {
final Item item = mock(Item.class);
when(item.getLengthMm())
.thenReturn(lengthMm);
return item;
}
public void naturalGivesSecondGreaterComparator() {
final Comparator<? super Item> comparator = new ComparatorFromSortOrderFunction().apply(SortOrder.NATURAL);
final Item itemA = getItem(2);
final Item itemB = getItem(4);
assertThat(comparator.compare(itemA, itemB))
.isEqualTo(1);
assertThat(comparator.compare(itemB, itemA))
.isEqualTo(1);
assertThat(comparator.compare(itemA, itemA))
.isEqualTo(0);
}
public void shortToLongGivesAppropriateComparator() {
final Ordering<? super Item> ordering
= Ordering.from(new ComparatorFromSortOrderFunction().apply(SortOrder.SHORT_TO_LONG));
final Item itemA = getItem(2);
final Item itemB = getItem(4);
assertThat(ordering.min(itemA, itemB))
.isEqualTo(itemA);
assertThat(ordering.max(itemA, itemB))
.isEqualTo(itemB);
assertThat(ordering.compare(itemA, itemA))
.isEqualTo(0);
}
public void longToShortGivesAppropriateComparator() {
final Ordering<? super Item> ordering
= Ordering.from(new ComparatorFromSortOrderFunction().apply(SortOrder.LONG_TO_SHORT));
final Item itemA = getItem(4);
final Item itemB = getItem(2);
assertThat(ordering.min(itemA, itemB))
.isEqualTo(itemA);
assertThat(ordering.max(itemA, itemB))
.isEqualTo(itemB);
assertThat(ordering.compare(itemA, itemA))
.isEqualTo(0);
}
}
| Richard-Ballard/pack-planner | src/test/java/com/github/richardballard/packplanner/item/order/ComparatorFromSortOrderFunctionTest.java | Java | apache-2.0 | 2,881 |
import Vue, { ComponentOptions, PluginFunction, AsyncComponent } from "vue";
type Component = ComponentOptions<Vue> | typeof Vue | AsyncComponent;
type Dictionary<T> = { [key: string]: T };
type ErrorHandler = (err: Error) => void;
export type RouterMode = "hash" | "history" | "abstract";
export type RawLocation = string | Location;
export type RedirectOption = RawLocation | ((to: Route) => RawLocation);
export type NavigationGuard<V extends Vue = Vue> = (
to: Route,
from: Route,
next: (to?: RawLocation | false | ((vm: V) => any) | void) => void
) => any
export declare class VueRouter {
constructor (options?: RouterOptions);
app: Vue;
mode: RouterMode;
currentRoute: Route;
beforeEach (guard: NavigationGuard): Function;
beforeResolve (guard: NavigationGuard): Function;
afterEach (hook: (to: Route, from: Route) => any): Function;
push (location: RawLocation, onComplete?: Function, onAbort?: ErrorHandler): void;
replace (location: RawLocation, onComplete?: Function, onAbort?: ErrorHandler): void;
go (n: number): void;
back (): void;
forward (): void;
getMatchedComponents (to?: RawLocation | Route): Component[];
onReady (cb: Function, errorCb?: ErrorHandler): void;
onError (cb: ErrorHandler): void;
addRoutes (routes: RouteConfig[]): void;
resolve (to: RawLocation, current?: Route, append?: boolean): {
location: Location;
route: Route;
href: string;
// backwards compat
normalizedTo: Location;
resolved: Route;
};
static install: PluginFunction<never>;
}
type Position = { x: number, y: number };
type PositionResult = Position | { selector: string, offset?: Position } | void;
export interface RouterOptions {
routes?: RouteConfig[];
mode?: RouterMode;
fallback?: boolean;
base?: string;
linkActiveClass?: string;
linkExactActiveClass?: string;
parseQuery?: (query: string) => Object;
stringifyQuery?: (query: Object) => string;
scrollBehavior?: (
to: Route,
from: Route,
savedPosition: Position | void
) => PositionResult | Promise<PositionResult>;
}
type RoutePropsFunction = (route: Route) => Object;
export interface PathToRegexpOptions {
sensitive?: boolean;
strict?: boolean;
end?: boolean;
}
export interface RouteConfig {
path: string;
name?: string;
component?: Component;
components?: Dictionary<Component>;
redirect?: RedirectOption;
alias?: string | string[];
children?: RouteConfig[];
meta?: any;
beforeEnter?: NavigationGuard;
props?: boolean | Object | RoutePropsFunction;
caseSensitive?: boolean;
pathToRegexpOptions?: PathToRegexpOptions;
}
export interface RouteRecord {
path: string;
regex: RegExp;
components: Dictionary<Component>;
instances: Dictionary<Vue>;
name?: string;
parent?: RouteRecord;
redirect?: RedirectOption;
matchAs?: string;
meta: any;
beforeEnter?: (
route: Route,
redirect: (location: RawLocation) => void,
next: () => void
) => any;
props: boolean | Object | RoutePropsFunction | Dictionary<boolean | Object | RoutePropsFunction>;
}
export interface Location {
name?: string;
path?: string;
hash?: string;
query?: Dictionary<string | (string | null)[] | null | undefined>;
params?: Dictionary<string>;
append?: boolean;
replace?: boolean;
}
export interface Route {
path: string;
name?: string;
hash: string;
query: Dictionary<string | (string | null)[]>;
params: Dictionary<string>;
fullPath: string;
matched: RouteRecord[];
redirectedFrom?: string;
meta?: any;
}
| falost/falost.github.io | static/libs/vue-router/types/router.d.ts | TypeScript | apache-2.0 | 3,536 |
package org.pitest.mutationtest.build;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
import org.pitest.bytecode.analysis.ClassTree;
import org.pitest.mutationtest.engine.Mutater;
import org.pitest.mutationtest.engine.MutationDetails;
import java.util.Arrays;
import java.util.Collection;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.pitest.mutationtest.engine.MutationDetailsMother.aMutationDetail;
@RunWith(MockitoJUnitRunner.class)
public class CompoundMutationInterceptorTest {
@Mock
MutationInterceptor modifyChild;
@Mock
MutationInterceptor filterChild;
@Mock
MutationInterceptor otherChild;
@Mock
MutationInterceptor reportChild;
@Mock
MutationInterceptor cosmeticChild;
@Mock
Mutater mutater;
CompoundMutationInterceptor testee;
@Before
public void setUp() {
when(this.modifyChild.type()).thenReturn(InterceptorType.MODIFY);
when(this.filterChild.type()).thenReturn(InterceptorType.FILTER);
when(this.otherChild.type()).thenReturn(InterceptorType.OTHER);
when(this.cosmeticChild.type()).thenReturn(InterceptorType.MODIFY_COSMETIC);
when(this.reportChild.type()).thenReturn(InterceptorType.REPORT);
}
@Test
public void shouldNotifyAllChildrenOfNewClass() {
this.testee = new CompoundMutationInterceptor(Arrays.asList(this.modifyChild,this.filterChild));
final ClassTree aClass = new ClassTree(null);
this.testee.begin(aClass);
verify(this.modifyChild).begin(aClass);
verify(this.filterChild).begin(aClass);
}
@Test
public void shouldFilterChildren() {
this.testee = new CompoundMutationInterceptor(Arrays.asList(this.modifyChild,this.filterChild));
final ClassTree aClass = new ClassTree(null);
this.testee.filter(i -> i == modifyChild).begin(aClass);
verify(this.modifyChild).begin(aClass);
verify(this.filterChild, never()).begin(aClass);
}
@Test
public void shouldChainModifiedMutantListsThroughChildrenInCorrectOrder() {
// add out of order
this.testee = new CompoundMutationInterceptor(Arrays.asList(this.cosmeticChild, this.otherChild, this.modifyChild, this.reportChild, this.filterChild));
final Collection<MutationDetails> original = aMutationDetail().build(1);
final Collection<MutationDetails> modifyResult = aMutationDetail().build(2);
final Collection<MutationDetails> filterResult = aMutationDetail().build(3);
final Collection<MutationDetails> reportResult = aMutationDetail().build(3);
final Collection<MutationDetails> cosmeticResult = aMutationDetail().build(3);
final Collection<MutationDetails> otherResult = aMutationDetail().build(3);
when(this.modifyChild.intercept(any(Collection.class), any(Mutater.class))).thenReturn(modifyResult);
when(this.filterChild.intercept(any(Collection.class), any(Mutater.class))).thenReturn(filterResult);
when(this.reportChild.intercept(any(Collection.class), any(Mutater.class))).thenReturn(reportResult);
when(this.cosmeticChild.intercept(any(Collection.class), any(Mutater.class))).thenReturn(cosmeticResult);
when(this.otherChild.intercept(any(Collection.class), any(Mutater.class))).thenReturn(otherResult);
final Collection<MutationDetails> actual = this.testee.intercept(original, this.mutater);
assertThat(actual).isEqualTo(reportResult);
verify(this.otherChild).intercept(original,this.mutater);
verify(this.modifyChild).intercept(otherResult,this.mutater);
verify(this.filterChild).intercept(modifyResult,this.mutater);
verify(this.cosmeticChild).intercept(cosmeticResult,this.mutater);
verify(this.reportChild).intercept(cosmeticResult,this.mutater);
}
@Test
public void shouldNotifyAllChildrenOfEnd() {
this.testee = new CompoundMutationInterceptor(Arrays.asList(this.modifyChild,this.filterChild));
this.testee.end();
verify(this.modifyChild).end();
verify(this.filterChild).end();
}
}
| hcoles/pitest | pitest-entry/src/test/java/org/pitest/mutationtest/build/CompoundMutationInterceptorTest.java | Java | apache-2.0 | 4,212 |
package io.quarkus.maven.it.verifier;
import org.apache.maven.shared.invoker.InvocationRequest;
import org.apache.maven.shared.invoker.InvocationResult;
import org.apache.maven.shared.utils.cli.CommandLineException;
/**
* Result of {@link MavenProcessInvoker#execute(InvocationRequest)}. It keeps a reference on the created process.
*
* @author <a href="http://escoffier.me">Clement Escoffier</a>
*/
public class MavenProcessInvocationResult implements InvocationResult {
private Process process;
private CommandLineException exception;
void destroy() {
if (process != null) {
process.destroy();
try {
process.waitFor();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
e.printStackTrace();
}
}
}
MavenProcessInvocationResult setProcess(Process process) {
this.process = process;
return this;
}
public MavenProcessInvocationResult setException(CommandLineException exception) {
// Print the stack trace immediately to give some feedback early
// In intellij, the used `mvn` executable is not "executable" by default on Mac and probably linux.
// You need to chmod +x the file.
exception.printStackTrace();
this.exception = exception;
return this;
}
@Override
public CommandLineException getExecutionException() {
return exception;
}
@Override
public int getExitCode() {
if (process == null) {
throw new IllegalStateException("No process");
} else {
return process.exitValue();
}
}
public Process getProcess() {
return process;
}
}
| quarkusio/quarkus | test-framework/maven/src/main/java/io/quarkus/maven/it/verifier/MavenProcessInvocationResult.java | Java | apache-2.0 | 1,770 |