code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
/* * Copyright 2009 Alin Dreghiciu. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * * See the License for the specific language governing permissions and * limitations under the License. */ package org.ops4j.pax.web.service.tomcat.internal; import org.osgi.framework.BundleActivator; import org.osgi.framework.BundleContext; /** * TODO Add JavaDoc. * * @author Achim Nierbeck * @since 4.0.0, Aug 31, 2013 */ public class CompositeActivator implements BundleActivator { private BundleActivator paxWebActivator; private BundleActivator tomcatActivator; public CompositeActivator() { paxWebActivator = new org.ops4j.pax.web.service.internal.Activator(); tomcatActivator = new Activator(); } @Override public void start(BundleContext bundleContext) throws Exception { tomcatActivator.start(bundleContext); paxWebActivator.start(bundleContext); } @Override public void stop(BundleContext bundleContext) throws Exception { paxWebActivator.stop(bundleContext); tomcatActivator.stop(bundleContext); } }
stsiano/org.ops4j.pax.web
pax-web-tomcat-bundle/src/main/java/org/ops4j/pax/web/service/tomcat/internal/CompositeActivator.java
Java
apache-2.0
1,440
package parallels import ( "archive/tar" "bytes" "fmt" "io/ioutil" "os" "path/filepath" "regexp" "runtime" "strconv" "strings" "time" "github.com/codegangsta/cli" "github.com/docker/machine/drivers" "github.com/docker/machine/log" "github.com/docker/machine/ssh" "github.com/docker/machine/state" "github.com/docker/machine/utils" ) const ( isoFilename = "boot2docker.iso" shareFolderName = "Users" shareFolderPath = "/Users" minDiskSize = 32 ) type Driver struct { *drivers.BaseDriver CPU int Memory int DiskSize int Boot2DockerURL string } func init() { drivers.Register("parallels", &drivers.RegisteredDriver{ New: NewDriver, GetCreateFlags: GetCreateFlags, }) } // GetCreateFlags registers the flags this driver adds to // "docker hosts create" func GetCreateFlags() []cli.Flag { return []cli.Flag{ cli.IntFlag{ EnvVar: "PARALLELS_MEMORY_SIZE", Name: "parallels-memory", Usage: "Size of memory for host in MB", Value: 1024, }, cli.IntFlag{ EnvVar: "PARALLELS_CPU_COUNT", Name: "parallels-cpu-count", Usage: "number of CPUs for the machine (-1 to use the number of CPUs available)", Value: 1, }, cli.IntFlag{ EnvVar: "PARALLELS_DISK_SIZE", Name: "parallels-disk-size", Usage: "Size of disk for host in MB", Value: 20000, }, cli.StringFlag{ EnvVar: "PARALLELS_BOOT2DOCKER_URL", Name: "parallels-boot2docker-url", Usage: "The URL of the boot2docker image. Defaults to the latest available version", Value: "", }, } } func NewDriver(machineName string, storePath string, caCert string, privateKey string) (drivers.Driver, error) { inner := drivers.NewBaseDriver(machineName, storePath, caCert, privateKey) return &Driver{BaseDriver: inner}, nil } func (d *Driver) GetSSHHostname() (string, error) { return d.GetIP() } func (d *Driver) GetSSHUsername() string { if d.SSHUser == "" { d.SSHUser = "docker" } return d.SSHUser } func (d *Driver) DriverName() string { return "parallels" } func (d *Driver) GetURL() (string, error) { ip, err := d.GetIP() if err != nil { return "", err } if ip == "" { return "", nil } return fmt.Sprintf("tcp://%s:2376", ip), nil } func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error { d.CPU = flags.Int("parallels-cpu-count") d.Memory = flags.Int("parallels-memory") d.DiskSize = flags.Int("parallels-disk-size") d.Boot2DockerURL = flags.String("parallels-boot2docker-url") d.SwarmMaster = flags.Bool("swarm-master") d.SwarmHost = flags.String("swarm-host") d.SwarmDiscovery = flags.String("swarm-discovery") d.SSHUser = "docker" d.SSHPort = 22 return nil } func (d *Driver) PreCreateCheck() error { // Check platform type if runtime.GOOS != "darwin" { return fmt.Errorf("Driver \"parallels\" works only on OS X!") } // Check Parallels Desktop version ver, err := d.getParallelsVersion() if err != nil { return err } if ver < 11 { return fmt.Errorf("Driver \"parallels\" supports only Parallels Desktop 11 and higher. You use: Parallels Desktop %d.", ver) } // Check Parallels Desktop edition edit, err := d.getParallelsEdition() if err != nil { return err } switch edit { case "pro", "business": break default: return fmt.Errorf("Docker Machine can be used only with Parallels Desktop Pro or Business edition. You use: %s edition", edit) } return nil } func (d *Driver) Create() error { var ( err error ) b2dutils := utils.NewB2dUtils("", "") if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, d.MachineName); err != nil { return err } log.Infof("Creating SSH key...") if err := ssh.GenerateSSHKey(d.GetSSHKeyPath()); err != nil { return err } log.Infof("Creating Parallels Desktop VM...") if err := prlctl("create", d.MachineName, "--distribution", "boot2docker", "--dst", d.ResolveStorePath("."), "--no-hdd"); err != nil { return err } cpus := d.CPU if cpus < 1 { cpus = int(runtime.NumCPU()) } if cpus > 32 { cpus = 32 } if err := prlctl("set", d.MachineName, "--select-boot-device", "off", "--cpus", fmt.Sprintf("%d", cpus), "--memsize", fmt.Sprintf("%d", d.Memory), "--cpu-hotplug", "off", "--nested-virt", "on", "--pmu-virt", "on", "--on-window-close", "keep-running", "--longer-battery-life", "on", "--3d-accelerate", "off", "--device-bootorder", "cdrom0"); err != nil { return err } if err := prlctl("set", d.MachineName, "--device-set", "cdrom0", "--iface", "sata", "--position", "0", "--image", d.ResolveStorePath(isoFilename)); err != nil { return err } // Create a small plain disk. It will be converted and expanded later if err := prlctl("set", d.MachineName, "--device-add", "hdd", "--iface", "sata", "--position", "1", "--image", d.diskPath(), "--type", "plain", "--size", fmt.Sprintf("%d", minDiskSize)); err != nil { return err } if err := d.generateDiskImage(d.DiskSize); err != nil { return err } // Disable Time Sync feature because it has an issue with timezones. // TODO: Turn it back as soon as Time Sync is fixed in Parallels Tools if err := prlctl("set", d.MachineName, "--time-sync", "off"); err != nil { return err } // Enable headless mode if err := prlctl("set", d.MachineName, "--startup-view", "headless"); err != nil { return err } // Enable Shared Folders if err := prlctl("set", d.MachineName, "--shf-host", "on"); err != nil { return err } if err := prlctl("set", d.MachineName, "--shf-host-add", shareFolderName, "--path", shareFolderPath); err != nil { return err } log.Infof("Starting Parallels Desktop VM...") // Don't use Start() since it expects to have a dhcp lease already if err := prlctl("start", d.MachineName); err != nil { return err } var ip string log.Infof("Waiting for VM to come online...") for i := 1; i <= 60; i++ { ip, err = d.getIPfromDHCPLease() if err != nil { log.Debugf("Not there yet %d/%d, error: %s", i, 60, err) time.Sleep(2 * time.Second) continue } if ip != "" { log.Debugf("Got an ip: %s", ip) break } } if ip == "" { return fmt.Errorf("Machine didn't return an IP after 120 seconds, aborting") } d.IPAddress = ip if err := d.Start(); err != nil { return err } return nil } func (d *Driver) Start() error { s, err := d.GetState() if err != nil { return err } switch s { case state.Stopped, state.Saved, state.Paused: if err := prlctl("start", d.MachineName); err != nil { return err } log.Infof("Waiting for VM to start...") case state.Running: break default: log.Infof("VM not in restartable state") } if err := drivers.WaitForSSH(d); err != nil { return err } d.IPAddress, err = d.GetIP() if err != nil { return err } // Mount Share Folder if err := d.mountShareFolder(shareFolderName, shareFolderPath); err != nil { return err } return nil } func (d *Driver) Stop() error { if err := prlctl("stop", d.MachineName); err != nil { return err } for { s, err := d.GetState() if err != nil { return err } if s == state.Running { time.Sleep(1 * time.Second) } else { break } } return nil } func (d *Driver) Remove() error { s, err := d.GetState() if err != nil { if err == ErrMachineNotExist { log.Infof("machine does not exist, assuming it has been removed already") return nil } return err } if s == state.Running { if err := d.Kill(); err != nil { return err } } return prlctl("delete", d.MachineName) } func (d *Driver) Restart() error { if err := d.Stop(); err != nil { return err } return d.Start() } func (d *Driver) Kill() error { return prlctl("stop", d.MachineName, "--kill") } func (d *Driver) GetState() (state.State, error) { stdout, stderr, err := prlctlOutErr("list", d.MachineName, "--output", "status", "--no-header") if err != nil { if reMachineNotFound.FindString(stderr) != "" { return state.Error, ErrMachineNotExist } return state.Error, err } switch stdout { // TODO: state.Starting ?! case "running\n": return state.Running, nil case "paused\n": return state.Paused, nil case "suspended\n": return state.Saved, nil case "stopping\n": return state.Stopping, nil case "stopped\n": return state.Stopped, nil } return state.None, nil } func (d *Driver) GetIP() (string, error) { // Assume that Parallels Desktop hosts don't have IPs unless they are running s, err := d.GetState() if err != nil { return "", err } if s != state.Running { return "", drivers.ErrHostIsNotRunning } ip, err := d.getIPfromDHCPLease() if err != nil { return "", err } return ip, nil } // Detect Parallels Desktop major version func (d *Driver) getParallelsVersion() (int, error) { stdout, stderr, err := prlctlOutErr("--version") if err != nil { if err == ErrPrlctlNotFound { return 0, err } return 0, fmt.Errorf(string(stderr)) } // Parse Parallels Desktop version res := reMajorVersion.FindStringSubmatch(string(stdout)) if res == nil { return 0, fmt.Errorf("Parallels Desktop version could not be fetched: %s", stdout) } major_ver, err := strconv.Atoi(res[1]) if err != nil { return 0, err } return major_ver, nil } // Detect Parallels Desktop edition func (d *Driver) getParallelsEdition() (string, error) { stdout, stderr, err := prlsrvctlOutErr("info", "--license") if err != nil { if err == ErrPrlsrvctlNotFound { return "", err } return "", fmt.Errorf(string(stderr)) } // Parse Parallels Desktop version res := reParallelsEdition.FindStringSubmatch(string(stdout)) if res == nil { return "", fmt.Errorf("Parallels Desktop Edition could not be fetched!") } return res[1], nil } func (d *Driver) getIPfromDHCPLease() (string, error) { dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases" stdout, err := prlctlOut("list", "-i", d.MachineName) macRe := regexp.MustCompile("net0.* mac=([0-9A-F]{12}) card=.*") macMatch := macRe.FindAllStringSubmatch(stdout, 1) if len(macMatch) != 1 { return "", fmt.Errorf("MAC address for NIC: nic0 on Virtual Machine: %s not found!\n", d.MachineName) } mac := macMatch[0][1] if len(mac) != 12 { return "", fmt.Errorf("Not a valid MAC address: %s. It should be exactly 12 digits.", mac) } leases, err := ioutil.ReadFile(dhcp_lease_file) if err != nil { return "", err } ipRe := regexp.MustCompile("(.*)=\"(.*),(.*)," + strings.ToLower(mac) + ",.*\"") mostRecentIp := "" mostRecentLease := uint64(0) for _, l := range ipRe.FindAllStringSubmatch(string(leases), -1) { ip := l[1] expiry, _ := strconv.ParseUint(l[2], 10, 64) leaseTime, _ := strconv.ParseUint(l[3], 10, 32) log.Debugf("Found lease: %s for MAC: %s, expiring at %d, leased for %d s.\n", ip, mac, expiry, leaseTime) if mostRecentLease <= expiry-leaseTime { mostRecentIp = ip mostRecentLease = expiry - leaseTime } } if len(mostRecentIp) == 0 { return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, dhcp_lease_file) } log.Debugf("Found IP lease: %s for MAC address %s\n", mostRecentIp, mac) return mostRecentIp, nil } func (d *Driver) publicSSHKeyPath() string { return d.GetSSHKeyPath() + ".pub" } func (d *Driver) diskPath() string { return d.ResolveStorePath("disk.hdd") } func (d *Driver) mountShareFolder(shareName string, mountPoint string) error { cmd := "sudo mkdir -p " + mountPoint + " && sudo mount -t prl_fs " + shareName + " " + mountPoint if _, err := os.Stat(mountPoint); err != nil { if !os.IsNotExist(err) { return err } else { log.Infof("Host path '%s' does not exist. Skipping mount to VM...", mountPoint) } } else { if _, err := drivers.RunSSHCommandFromDriver(d, cmd); err != nil { return fmt.Errorf("Error mounting shared folder: %s", err) } } return nil } // Make a boot2docker VM disk image. func (d *Driver) generateDiskImage(size int) error { tarBuf, err := d.generateTar() if err != nil { return err } minSizeBytes := int64(minDiskSize) << 20 // usually won't fit in 32-bit int (max 2GB) //Expand the initial image if needed if bufLen := int64(tarBuf.Len()); bufLen > minSizeBytes { bufLenMBytes := bufLen>>20 + 1 if err := prldisktool("resize", "--hdd", d.diskPath(), "--size", fmt.Sprintf("%d", bufLenMBytes)); err != nil { return err } } // Find hds file hdsList, err := filepath.Glob(d.diskPath() + "/*.hds") if err != nil { return err } if len(hdsList) == 0 { return fmt.Errorf("Could not find *.hds image in %s", d.diskPath()) } hdsPath := hdsList[0] log.Debugf("HDS image path: %s", hdsPath) // Write tar to the hds file hds, err := os.OpenFile(hdsPath, os.O_WRONLY, 0644) if err != nil { return err } defer hds.Close() hds.Seek(0, os.SEEK_SET) _, err = hds.Write(tarBuf.Bytes()) if err != nil { return err } hds.Close() // Convert image to expanding type and resize it if err := prldisktool("convert", "--expanding", "--hdd", d.diskPath()); err != nil { return err } if err := prldisktool("resize", "--hdd", d.diskPath(), "--size", fmt.Sprintf("%d", size)); err != nil { return err } return nil } // See https://github.com/boot2docker/boot2docker/blob/master/rootfs/rootfs/etc/rc.d/automount func (d *Driver) generateTar() (*bytes.Buffer, error) { magicString := "boot2docker, please format-me" buf := new(bytes.Buffer) tw := tar.NewWriter(buf) // magicString first so the automount script knows to format the disk file := &tar.Header{Name: magicString, Size: int64(len(magicString))} if err := tw.WriteHeader(file); err != nil { return nil, err } if _, err := tw.Write([]byte(magicString)); err != nil { return nil, err } // .ssh/key.pub => authorized_keys file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700} if err := tw.WriteHeader(file); err != nil { return nil, err } pubKey, err := ioutil.ReadFile(d.publicSSHKeyPath()) if err != nil { return nil, err } file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644} if err := tw.WriteHeader(file); err != nil { return nil, err } if _, err := tw.Write([]byte(pubKey)); err != nil { return nil, err } file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644} if err := tw.WriteHeader(file); err != nil { return nil, err } if _, err := tw.Write([]byte(pubKey)); err != nil { return nil, err } if err := tw.Close(); err != nil { return nil, err } return buf, nil }
Kast0rTr0y/docker-machine
drivers/parallels/parallels.go
GO
apache-2.0
14,524
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.harmony.unpack200; import java.util.ArrayList; /** * An IcTuple is the set of information that describes an inner class. * * C is the fully qualified class name<br> * F is the flags<br> * C2 is the outer class name, or null if it can be inferred from C<br> * N is the inner class name, or null if it can be inferred from C<br> */ public class IcTuple { private final int cIndex; private final int c2Index; private final int nIndex; private final int tIndex; /** * * @param C * @param F * @param C2 * @param N * @param cIndex the index of C in cpClass * @param c2Index the index of C2 in cpClass, or -1 if C2 is null * @param nIndex the index of N in cpUTF8, or -1 if N is null */ public IcTuple(String C, int F, String C2, String N, int cIndex, int c2Index, int nIndex, int tIndex) { this.C = C; this.F = F; this.C2 = C2; this.N = N; this.cIndex = cIndex; this.c2Index = c2Index; this.nIndex = nIndex; this.tIndex = tIndex; if (null == N) { predictSimple = true; } if (null == C2) { predictOuter = true; } initializeClassStrings(); } public static final int NESTED_CLASS_FLAG = 0x00010000; protected String C; // this class protected int F; // flags protected String C2; // outer class protected String N; // name private boolean predictSimple; private boolean predictOuter; private String cachedOuterClassString; private String cachedSimpleClassName; private boolean initialized; private boolean anonymous; private boolean outerIsAnonymous; private boolean member = true; private int cachedOuterClassIndex = -1; private int cachedSimpleClassNameIndex = -1; /** * Answer true if the receiver is predicted; answer false if the receiver is * specified explicitly in the outer and name fields. */ public boolean predicted() { return predictOuter || predictSimple; } /** * Answer true if the receiver's bit 16 is set (indicating * that explicit outer class and name fields are set). * @return boolean */ public boolean nestedExplicitFlagSet() { return (F & NESTED_CLASS_FLAG) == NESTED_CLASS_FLAG; } /** * Break the receiver into components at $ boundaries. */ public String[] innerBreakAtDollar(String className) { ArrayList resultList = new ArrayList(); int start = 0; int index = 0; while (index < className.length()) { if (className.charAt(index) <= '$') { resultList.add(className.substring(start, index)); start = index + 1; } index++; if (index >= className.length()) { // Add the last element resultList.add(className.substring(start, className.length())); } } String[] result = new String[resultList.size()]; for (int i = 0; i < resultList.size(); i++) { result[i] = (String) resultList.get(i); } return result; } /** * Answer the outer class name for the receiver. This may either be * specified or inferred from inner class name. * * @return String name of outer class */ public String outerClassString() { return cachedOuterClassString; } /** * Answer the inner class name for the receiver. * * @return String name of inner class */ public String simpleClassName() { return cachedSimpleClassName; } /** * Answer the full name of the inner class represented by this tuple * (including its outer component) * * @return String full name of inner class */ public String thisClassString() { if (predicted()) { return C; } else { // TODO: this may not be right. What if I // get a class like Foo#Bar$Baz$Bug? return C2 + "$" + N; } } public boolean isMember() { return member; } public boolean isAnonymous() { return anonymous; } public boolean outerIsAnonymous() { return outerIsAnonymous; } private boolean computeOuterIsAnonymous() { String[] result = innerBreakAtDollar(cachedOuterClassString); if (result.length == 0) { throw new Error( "Should have an outer before checking if it's anonymous"); } for (int index = 0; index < result.length; index++) { if (isAllDigits(result[index])) { return true; } } return false; } private void initializeClassStrings() { if (initialized) { return; } initialized = true; if (!predictSimple) { cachedSimpleClassName = N; } if (!predictOuter) { cachedOuterClassString = C2; } // Class names must be calculated from // this class name. String nameComponents[] = innerBreakAtDollar(C); if (nameComponents.length == 0) { // Unable to predict outer class // throw new Error("Unable to predict outer class name: " + C); } if (nameComponents.length == 1) { // Unable to predict simple class name // throw new Error("Unable to predict inner class name: " + C); } if (nameComponents.length < 2) { // If we get here, we hope cachedSimpleClassName // and cachedOuterClassString were caught by the // predictSimple / predictOuter code above. return; } // If we get to this point, nameComponents.length must be >=2 int lastPosition = nameComponents.length - 1; cachedSimpleClassName = nameComponents[lastPosition]; cachedOuterClassString = ""; for (int index = 0; index < lastPosition; index++) { cachedOuterClassString += nameComponents[index]; if (isAllDigits(nameComponents[index])) { member = false; } if (index + 1 != lastPosition) { // TODO: might need more logic to handle // classes with separators of non-$ characters // (ie Foo#Bar) cachedOuterClassString += '$'; } } // TODO: these two blocks are the same as blocks // above. Can we eliminate some by reworking the logic? if (!predictSimple) { cachedSimpleClassName = N; cachedSimpleClassNameIndex = nIndex; } if (!predictOuter) { cachedOuterClassString = C2; cachedOuterClassIndex = c2Index; } if (isAllDigits(cachedSimpleClassName)) { anonymous = true; member = false; if (nestedExplicitFlagSet()) { // Predicted class - marking as member member = true; } } outerIsAnonymous = computeOuterIsAnonymous(); } private boolean isAllDigits(String nameString) { // Answer true if the receiver is all digits; otherwise answer false. if (null == nameString) { return false; } for (int index = 0; index < nameString.length(); index++) { if (!Character.isDigit(nameString.charAt(index))) { return false; } } return true; } public String toString() { StringBuffer result = new StringBuffer(); result.append("IcTuple "); result.append('('); result.append(simpleClassName()); result.append(" in "); result.append(outerClassString()); result.append(')'); return result.toString(); } public boolean nullSafeEquals(String stringOne, String stringTwo) { if (null == stringOne) { return null == stringTwo; } return stringOne.equals(stringTwo); } public boolean equals(Object object) { if ((object == null) || (object.getClass() != this.getClass())) { return false; } IcTuple compareTuple = (IcTuple) object; if (!nullSafeEquals(this.C, compareTuple.C)) { return false; } if (!nullSafeEquals(this.C2, compareTuple.C2)) { return false; } if (!nullSafeEquals(this.N, compareTuple.N)) { return false; } return true; } private boolean hashcodeComputed; private int cachedHashCode; private void generateHashCode() { hashcodeComputed = true; cachedHashCode = 17; if(C != null) { cachedHashCode =+ C.hashCode(); } if(C2 != null) { cachedHashCode =+ C2.hashCode(); } if(N != null) { cachedHashCode =+ N.hashCode(); } } public int hashCode() { if (!hashcodeComputed) generateHashCode(); return cachedHashCode; } public String getC() { return C; } public int getF() { return F; } public String getC2() { return C2; } public String getN() { return N; } public int getTupleIndex() { return tIndex; } public int thisClassIndex() { if(predicted()) { return cIndex; } else { return -1; } } public int outerClassIndex() { return cachedOuterClassIndex; } public int simpleClassNameIndex() { return cachedSimpleClassNameIndex; } }
freeVM/freeVM
enhanced/java/classlib/modules/pack200/src/main/java/org/apache/harmony/unpack200/IcTuple.java
Java
apache-2.0
10,575
import React from 'react'; /** dkjnslknkjfndn */ const CloseButton = ({ YouCanPassAnyProps, closeToast }) => ( <button ariaLabel="close" className="Toastify__close-button" onClick={closeToast}> <span></span> </button> ); /** huyhuyghu */ const notificationStyle = { position: 'bottom-right', closeButton: <CloseButton YouCanPassAnyProps="foo" />, }; export default notificationStyle;
wfp/ui
src/components/Notification/Notification.legacy.js
JavaScript
apache-2.0
411
/* * Licensed to ElasticSearch and Shay Banon under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. ElasticSearch licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.fielddata.fieldcomparator; import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.fielddata.BytesValues; import org.elasticsearch.index.fielddata.IndexFieldData; import java.io.IOException; /** * Sorts by field's natural Term sort order. All * comparisons are done using BytesRef.compareTo, which is * slow for medium to large result sets but possibly * very fast for very small results sets. */ public final class BytesRefValComparator extends NestedWrappableComparator<BytesRef> { private final IndexFieldData<?> indexFieldData; private final SortMode sortMode; private final BytesRef missingValue; private final BytesRef[] values; private BytesRef bottom; private BytesValues docTerms; BytesRefValComparator(IndexFieldData<?> indexFieldData, int numHits, SortMode sortMode, BytesRef missingValue) { this.sortMode = sortMode; values = new BytesRef[numHits]; this.indexFieldData = indexFieldData; this.missingValue = missingValue; } @Override public int compare(int slot1, int slot2) { final BytesRef val1 = values[slot1]; final BytesRef val2 = values[slot2]; return compareValues(val1, val2); } @Override public int compareBottom(int doc) throws IOException { BytesRef val2 = sortMode.getRelevantValue(docTerms, doc, missingValue); return compareValues(bottom, val2); } @Override public void copy(int slot, int doc) throws IOException { BytesRef relevantValue = sortMode.getRelevantValue(docTerms, doc, missingValue); if (relevantValue == missingValue) { values[slot] = missingValue; } else { if (values[slot] == null || values[slot] == missingValue) { values[slot] = new BytesRef(); } values[slot].copyBytes(relevantValue); } } @Override public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException { docTerms = indexFieldData.load(context).getBytesValues(false); return this; } @Override public void setBottom(final int bottom) { this.bottom = values[bottom]; } @Override public BytesRef value(int slot) { return values[slot]; } @Override public int compareValues(BytesRef val1, BytesRef val2) { if (val1 == null) { if (val2 == null) { return 0; } return -1; } else if (val2 == null) { return 1; } return val1.compareTo(val2); } @Override public int compareDocToValue(int doc, BytesRef value) { return sortMode.getRelevantValue(docTerms, doc, missingValue).compareTo(value); } @Override public void missing(int slot) { values[slot] = missingValue; } @Override public int compareBottomMissing() { return compareValues(bottom, missingValue); } }
kimchy/elasticsearch
src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefValComparator.java
Java
apache-2.0
3,934
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/sagemaker/model/SendPipelineExecutionStepFailureResult.h> #include <aws/core/utils/json/JsonSerializer.h> #include <aws/core/AmazonWebServiceResult.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/UnreferencedParam.h> #include <utility> using namespace Aws::SageMaker::Model; using namespace Aws::Utils::Json; using namespace Aws::Utils; using namespace Aws; SendPipelineExecutionStepFailureResult::SendPipelineExecutionStepFailureResult() { } SendPipelineExecutionStepFailureResult::SendPipelineExecutionStepFailureResult(const Aws::AmazonWebServiceResult<JsonValue>& result) { *this = result; } SendPipelineExecutionStepFailureResult& SendPipelineExecutionStepFailureResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result) { JsonView jsonValue = result.GetPayload().View(); if(jsonValue.ValueExists("PipelineExecutionArn")) { m_pipelineExecutionArn = jsonValue.GetString("PipelineExecutionArn"); } return *this; }
awslabs/aws-sdk-cpp
aws-cpp-sdk-sagemaker/source/model/SendPipelineExecutionStepFailureResult.cpp
C++
apache-2.0
1,113
package io.cattle.platform.api.auth.impl; import io.cattle.platform.api.auth.Identity; import io.cattle.platform.api.auth.Policy; import io.github.ibuildthecloud.gdapi.context.ApiContext; import io.github.ibuildthecloud.gdapi.request.ApiRequest; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; public class DefaultPolicy implements Policy { long accountId; long authenticatedAsAccountId; String name; Set<Identity> identities; PolicyOptions options; @SuppressWarnings("unchecked") public DefaultPolicy() { this(Policy.NO_ACCOUNT, Policy.NO_ACCOUNT, null, Collections.EMPTY_SET, new NoPolicyOptions()); } public DefaultPolicy(long accountId, long authenticatedAsAccountId, String name, Set<Identity> identities, PolicyOptions options) { super(); this.accountId = accountId; this.authenticatedAsAccountId = authenticatedAsAccountId; this.identities = identities; this.options = options; this.name = name; } @Override public Set<Identity> getIdentities(){ return identities; } @Override public boolean isOption(String optionName) { return options.isOption(optionName); } @Override public String getOption(String optionName) { return options.getOption(optionName); } @Override public <T> List<T> authorizeList(List<T> list) { List<T> result = new ArrayList<T>(list.size()); for (T obj : list) { T authorized = authorizeObject(obj); if (authorized != null) result.add(authorized); } return result; } @Override public <T> T authorizeObject(T obj) { return obj; } @Override public long getAccountId() { return accountId; } @Override public long getAuthenticatedAsAccountId() { return authenticatedAsAccountId; } @Override public String getUserName() { return name; } @Override public <T> void grantObjectAccess(T obj) { ApiRequest apiRequest = ApiContext.getContext().getApiRequest(); @SuppressWarnings("unchecked") Set<Object> whitelist = (Set<Object>) (apiRequest.getAttribute("whitelist")); if (whitelist == null) { whitelist = new HashSet<>(); } whitelist.add(obj); apiRequest.setAttribute("whitelist", whitelist); } protected <T> boolean hasGrantedAccess(T obj) { ApiRequest request = ApiContext.getContext().getApiRequest(); @SuppressWarnings("unchecked") Set<Object> whitelist = (Set<Object>) request.getAttribute("whitelist"); return (null != whitelist && whitelist.contains(obj)); } @Override public Set<String> getRoles() { return Collections.emptySet(); } }
vincent99/cattle
code/framework/api/src/main/java/io/cattle/platform/api/auth/impl/DefaultPolicy.java
Java
apache-2.0
2,921
// // EndpointConfig.cs // This file is part of Stardust // // Author: Jonas Syrstad (jsyrstad2+StardustCore@gmail.com), http://no.linkedin.com/in/jonassyrstad/) // Copyright (c) 2014 Jonas Syrstad. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.Configuration; using System.Linq; namespace Stardust.Interstellar.Config { public class EndpointConfig : ICloneable { private static bool DoFormat = true; public static void DoFormatOnRemoteAddress(bool doFormat) { DoFormat = doFormat; } public long Id { get; set; } public string ServiceName { get; set; } public string ActiveEndpoint { get; set; } public virtual List<Endpoint> Endpoints { get; set; } [Obsolete("hide from swagger", false)] public virtual ConfigurationSet Set { get; set; } public bool Deleted { get; set; } public object Clone() { return MemberwiseClone(); } internal string GetRemoteAddress(string root = null) { var active = GetEndpoint(ActiveEndpoint);//.Address; var part = active; var typeName = part.EndpointName; var adr = part.Address; var address = adr; if (adr.EndsWith(".svc") && ConfigurationManager.AppSettings["stardust.AppendBindingType"] == "true") address = adr + "/" + typeName; return adr; } private readonly ConcurrentDictionary<string , Endpoint> endpointCache=new ConcurrentDictionary<string, Endpoint>(); public Endpoint GetEndpoint(string name) { Endpoint endpoint; if (endpointCache.TryGetValue(name.ToLower(), out endpoint)) return endpoint; endpoint= (from e in Endpoints where string.Equals(e.EndpointName, name, StringComparison.OrdinalIgnoreCase) select e).FirstOrDefault(); endpointCache.TryAdd(name.ToLower(), endpoint); return endpoint; } } }
JonasSyrstad/Stardust.Nexus
Stardust.Nexus/Stardust.Nexus.Management.Client/EndpointConfig.cs
C#
apache-2.0
3,185
# Copyright 2019 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. def header(input_api): """Returns the expected license header regexp for this project.""" current_year = int(input_api.time.strftime('%Y')) allowed_years = (str(s) for s in reversed(range(2011, current_year + 1))) years_re = '(' + '|'.join(allowed_years) + ')' license_header = ( r'.*? Copyright %(year)s The LUCI Authors\. ' r'All rights reserved\.\n' r'.*? Use of this source code is governed under the Apache License, ' r'Version 2\.0\n' r'.*? that can be found in the LICENSE file\.(?: \*/)?\n' ) % { 'year': years_re, } return license_header def CommonChecks(input_api, output_api): return input_api.canned_checks.PanProjectChecks( input_api, output_api, license_header=header(input_api), excluded_paths=[ r'.+_pb2\.py', ], ) def CheckChangeOnUpload(input_api, output_api): return CommonChecks(input_api, output_api) def CheckChangeOnCommit(input_api, output_api): results = CommonChecks(input_api, output_api) # Explicitly run these independently because they update files on disk and are # called implicitly with the other tests. The vpython check is nominally # locked with a file lock, but updating the protos, etc. of recipes.py is not. recipes_py = input_api.os_path.join( input_api.PresubmitLocalPath(), 'recipes.py') run_first = ( input_api.canned_checks.CheckVPythonSpec(input_api, output_api) + [ input_api.Command( 'Compile recipe protos', ['python', recipes_py, 'fetch'], {}, output_api.PresubmitError, ), ]) for cmd in run_first: result = input_api.thread_pool.CallCommand(cmd) if result: results.append(result) # Now run all the unit tests except run_test in parallel and then run run_test # separately. The reason is that run_test depends on the wall clock on the # host and if the host gets busy, the tests are likely to be flaky. results.extend(input_api.RunTests( input_api.canned_checks.GetUnitTestsInDirectory( input_api, output_api, 'unittests', files_to_check=[r'.+_test\.py'], files_to_skip=[r'run_test\.py'], ) )) results.extend(input_api.RunTests( input_api.canned_checks.GetUnitTestsInDirectory( input_api, output_api, 'unittests', files_to_check=[r'run_test\.py'], ) )) return results
luci/recipes-py
PRESUBMIT.py
Python
apache-2.0
2,563
<%# Copyright 2013-2017 the original author or authors. This file is part of the JHipster project, see https://jhipster.github.io/ for more information. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -%> (function() { 'use strict'; angular .module('<%=angularAppName%>') .controller('<%=jhiPrefixCapitalized%>ConfigurationController', <%=jhiPrefixCapitalized%>ConfigurationController); <%=jhiPrefixCapitalized%>ConfigurationController.$inject = ['$filter','<%=jhiPrefixCapitalized%>ConfigurationService']; function <%=jhiPrefixCapitalized%>ConfigurationController (filter,<%=jhiPrefixCapitalized%>ConfigurationService) { var vm = this; vm.allConfiguration = null; vm.configuration = null; <%=jhiPrefixCapitalized%>ConfigurationService.get().then(function(configuration) { vm.configuration = configuration; }); <%=jhiPrefixCapitalized%>ConfigurationService.getEnv().then(function (configuration) { vm.allConfiguration = configuration; }); } })();
fjuriolli/scribble
node_modules/generator-jhipster/generators/client/templates/angularjs/src/main/webapp/app/admin/configuration/_configuration.controller.js
JavaScript
apache-2.0
1,559
// +build !providerless /* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package azure import ( "context" "fmt" "math" "reflect" "sort" "strconv" "strings" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" "github.com/Azure/go-autorest/autorest/to" v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" servicehelpers "k8s.io/cloud-provider/service/helpers" "k8s.io/klog/v2" azcache "k8s.io/legacy-cloud-providers/azure/cache" "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" utilnet "k8s.io/utils/net" ) const ( // ServiceAnnotationLoadBalancerInternal is the annotation used on the service ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal" // ServiceAnnotationLoadBalancerInternalSubnet is the annotation used on the service // to specify what subnet it is exposed on ServiceAnnotationLoadBalancerInternalSubnet = "service.beta.kubernetes.io/azure-load-balancer-internal-subnet" // ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify the // Azure load balancer selection based on availability sets // There are currently three possible load balancer selection modes : // 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode") // In this case the Loadbalancer of the primary Availability set is selected // 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set // is selected which has the minimum rules associated with it. // 3. "as1,as2" mode - this is when the load balancer from the specified availability sets is selected that has the // minimum rules associated with it. ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode" // ServiceAnnotationLoadBalancerAutoModeValue is the annotation used on the service to specify the // Azure load balancer auto selection from the availability sets ServiceAnnotationLoadBalancerAutoModeValue = "__auto__" // ServiceAnnotationDNSLabelName is the annotation used on the service // to specify the DNS label name for the service. ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name" // ServiceAnnotationSharedSecurityRule is the annotation used on the service // to specify that the service should be exposed using an Azure security rule // that may be shared with other service, trading specificity of rules for an // increase in the number of services that can be exposed. This relies on the // Azure "augmented security rules" feature. ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule" // ServiceAnnotationLoadBalancerResourceGroup is the annotation used on the service // to specify the resource group of load balancer objects that are not in the same resource group as the cluster. ServiceAnnotationLoadBalancerResourceGroup = "service.beta.kubernetes.io/azure-load-balancer-resource-group" // ServiceAnnotationPIPName specifies the pip that will be applied to load balancer ServiceAnnotationPIPName = "service.beta.kubernetes.io/azure-pip-name" // ServiceAnnotationIPTagsForPublicIP specifies the iptags used when dynamically creating a public ip ServiceAnnotationIPTagsForPublicIP = "service.beta.kubernetes.io/azure-pip-ip-tags" // ServiceAnnotationAllowedServiceTag is the annotation used on the service // to specify a list of allowed service tags separated by comma // Refer https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#service-tags for all supported service tags. ServiceAnnotationAllowedServiceTag = "service.beta.kubernetes.io/azure-allowed-service-tags" // ServiceAnnotationLoadBalancerIdleTimeout is the annotation used on the service // to specify the idle timeout for connections on the load balancer in minutes. ServiceAnnotationLoadBalancerIdleTimeout = "service.beta.kubernetes.io/azure-load-balancer-tcp-idle-timeout" // ServiceAnnotationLoadBalancerEnableHighAvailabilityPorts is the annotation used on the service // to enable the high availability ports on the standard internal load balancer. ServiceAnnotationLoadBalancerEnableHighAvailabilityPorts = "service.beta.kubernetes.io/azure-load-balancer-enable-high-availability-ports" // ServiceAnnotationLoadBalancerDisableTCPReset is the annotation used on the service // to set enableTcpReset to false in load balancer rule. This only works for Azure standard load balancer backed service. // TODO(feiskyer): disable-tcp-reset annotations has been depracated since v1.18, it would removed on v1.20. ServiceAnnotationLoadBalancerDisableTCPReset = "service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset" // ServiceAnnotationLoadBalancerHealthProbeProtocol determines the network protocol that the load balancer health probe use. // If not set, the local service would use the HTTP and the cluster service would use the TCP by default. ServiceAnnotationLoadBalancerHealthProbeProtocol = "service.beta.kubernetes.io/azure-load-balancer-health-probe-protocol" // ServiceAnnotationLoadBalancerHealthProbeRequestPath determines the request path of the load balancer health probe. // This is only useful for the HTTP and HTTPS, and would be ignored when using TCP. If not set, // `/healthz` would be configured by default. ServiceAnnotationLoadBalancerHealthProbeRequestPath = "service.beta.kubernetes.io/azure-load-balancer-health-probe-request-path" // ServiceAnnotationAzurePIPTags determines what tags should be applied to the public IP of the service. The cluster name // and service names tags (which is managed by controller manager itself) would keep unchanged. The supported format // is `a=b,c=d,...`. After updated, the old user-assigned tags would not be replaced by the new ones. ServiceAnnotationAzurePIPTags = "service.beta.kubernetes.io/azure-pip-tags" // serviceTagKey is the service key applied for public IP tags. serviceTagKey = "service" // clusterNameKey is the cluster name key applied for public IP tags. clusterNameKey = "kubernetes-cluster-name" // serviceUsingDNSKey is the service name consuming the DNS label on the public IP serviceUsingDNSKey = "kubernetes-dns-label-service" defaultLoadBalancerSourceRanges = "0.0.0.0/0" ) // GetLoadBalancer returns whether the specified load balancer and its components exist, and // if so, what its status is. func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { // Since public IP is not a part of the load balancer on Azure, // there is a chance that we could orphan public IP resources while we delete the load blanacer (kubernetes/kubernetes#80571). // We need to make sure the existence of the load balancer depends on the load balancer resource and public IP resource on Azure. existsPip := func() bool { pipName, _, err := az.determinePublicIPName(clusterName, service) if err != nil { return false } pipResourceGroup := az.getPublicIPAddressResourceGroup(service) _, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName) if err != nil { return false } return existsPip }() _, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false) if err != nil { return nil, existsPip, err } // Return exists = false only if the load balancer and the public IP are not found on Azure if !existsLb && !existsPip { serviceName := getServiceName(service) klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) return nil, false, nil } // Return exists = true if either the load balancer or the public IP (or both) exists return status, true, nil } func getPublicIPDomainNameLabel(service *v1.Service) (string, bool) { if labelName, found := service.Annotations[ServiceAnnotationDNSLabelName]; found { return labelName, found } return "", false } // EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { // When a client updates the internal load balancer annotation, // the service may be switched from an internal LB to a public one, or vise versa. // Here we'll firstly ensure service do not lie in the opposite LB. serviceName := getServiceName(service) klog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName) mc := metrics.NewMetricContext("services", "ensure_loadbalancer", az.ResourceGroup, az.SubscriptionID, serviceName) isOperationSucceeded := false defer func() { mc.ObserveOperationWithResult(isOperationSucceeded) }() lb, err := az.reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */) if err != nil { klog.Errorf("reconcileLoadBalancer(%s) failed: %v", serviceName, err) return nil, err } lbStatus, err := az.getServiceLoadBalancerStatus(service, lb) if err != nil { klog.Errorf("getServiceLoadBalancerStatus(%s) failed: %v", serviceName, err) return nil, err } var serviceIP *string if lbStatus != nil && len(lbStatus.Ingress) > 0 { serviceIP = &lbStatus.Ingress[0].IP } klog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP)) if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil { klog.Errorf("reconcileSecurityGroup(%s) failed: %#v", serviceName, err) return nil, err } updateService := updateServiceLoadBalancerIP(service, to.String(serviceIP)) flippedService := flipServiceInternalAnnotation(updateService) if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil { klog.Errorf("reconcileLoadBalancer(%s) failed: %#v", serviceName, err) return nil, err } // lb is not reused here because the ETAG may be changed in above operations, hence reconcilePublicIP() would get lb again from cache. klog.V(2).Infof("EnsureLoadBalancer: reconciling pip") if _, err := az.reconcilePublicIP(clusterName, updateService, to.String(lb.Name), true /* wantLb */); err != nil { klog.Errorf("reconcilePublicIP(%s) failed: %#v", serviceName, err) return nil, err } isOperationSucceeded = true return lbStatus, nil } // UpdateLoadBalancer updates hosts under the specified load balancer. func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { if !az.shouldUpdateLoadBalancer(clusterName, service) { klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because it is either being deleted or does not exist anymore", service.Name) return nil } _, err := az.EnsureLoadBalancer(ctx, clusterName, service, nodes) return err } // EnsureLoadBalancerDeleted deletes the specified load balancer if it // exists, returning nil if the load balancer specified either didn't exist or // was successfully deleted. // This construction is useful because many cloud providers' load balancers // have multiple underlying components, meaning a Get could say that the LB // doesn't exist even if some part of it is still laying around. func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) klog.V(5).Infof("Delete service (%s): START clusterName=%q", serviceName, clusterName) mc := metrics.NewMetricContext("services", "ensure_loadbalancer_deleted", az.ResourceGroup, az.SubscriptionID, serviceName) isOperationSucceeded := false defer func() { mc.ObserveOperationWithResult(isOperationSucceeded) }() serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal) if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) { return err } klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup) if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil { return err } if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) { return err } if _, err := az.reconcilePublicIP(clusterName, service, "", false /* wantLb */); err != nil { return err } klog.V(2).Infof("Delete service (%s): FINISH", serviceName) isOperationSucceeded = true return nil } // GetLoadBalancerName returns the LoadBalancer name. func (az *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string { return cloudprovider.DefaultLoadBalancerName(service) } func (az *Cloud) getLoadBalancerResourceGroup() string { if az.LoadBalancerResourceGroup != "" { return az.LoadBalancerResourceGroup } return az.ResourceGroup } // cleanBackendpoolForPrimarySLB decouples the unwanted nodes from the standard load balancer. // This is needed because when migrating from single SLB to multiple SLBs, The existing // SLB's backend pool contains nodes from different agent pools, while we only want the // nodes from the primary agent pool to join the backend pool. func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer, service *v1.Service, clusterName string) (*network.LoadBalancer, error) { lbBackendPoolName := getBackendPoolName(clusterName, service) lbResourceGroup := az.getLoadBalancerResourceGroup() lbBackendPoolID := az.getBackendPoolID(to.String(primarySLB.Name), lbResourceGroup, lbBackendPoolName) newBackendPools := make([]network.BackendAddressPool, 0) if primarySLB.LoadBalancerPropertiesFormat != nil && primarySLB.BackendAddressPools != nil { newBackendPools = *primarySLB.BackendAddressPools } vmSetNameToBackendIPConfigurationsToBeDeleted := make(map[string][]network.InterfaceIPConfiguration) for j, bp := range newBackendPools { if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) { klog.V(2).Infof("cleanBackendpoolForPrimarySLB: checking the backend pool %s from standard load balancer %s", to.String(bp.Name), to.String(primarySLB.Name)) if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil { for i := len(*bp.BackendIPConfigurations) - 1; i >= 0; i-- { ipConf := (*bp.BackendIPConfigurations)[i] ipConfigID := to.String(ipConf.ID) _, vmSetName, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfigID) if err != nil { return nil, err } primaryVMSetName := az.VMSet.GetPrimaryVMSetName() if !strings.EqualFold(primaryVMSetName, vmSetName) { klog.V(2).Infof("cleanBackendpoolForPrimarySLB: found unwanted vmSet %s, decouple it from the LB", vmSetName) // construct a backendPool that only contains the IP config of the node to be deleted interfaceIPConfigToBeDeleted := network.InterfaceIPConfiguration{ ID: to.StringPtr(ipConfigID), } vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName] = append(vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName], interfaceIPConfigToBeDeleted) *bp.BackendIPConfigurations = append((*bp.BackendIPConfigurations)[:i], (*bp.BackendIPConfigurations)[i+1:]...) } } } newBackendPools[j] = bp break } } for vmSetName, backendIPConfigurationsToBeDeleted := range vmSetNameToBackendIPConfigurationsToBeDeleted { backendpoolToBeDeleted := &[]network.BackendAddressPool{ { ID: to.StringPtr(lbBackendPoolID), BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{ BackendIPConfigurations: &backendIPConfigurationsToBeDeleted, }, }, } // decouple the backendPool from the node err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted) if err != nil { return nil, err } primarySLB.BackendAddressPools = &newBackendPools } return primarySLB, nil } // getServiceLoadBalancer gets the loadbalancer for the service if it already exists. // If wantLb is TRUE then -it selects a new load balancer. // In case the selected load balancer does not exist it returns network.LoadBalancer struct // with added metadata (such as name, location) and existsLB set to FALSE. // By default - cluster default LB is returned. func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) { isInternal := requiresInternalLoadBalancer(service) var defaultLB *network.LoadBalancer primaryVMSetName := az.VMSet.GetPrimaryVMSetName() defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal) useMultipleSLBs := az.useStandardLoadBalancer() && az.EnableMultipleStandardLoadBalancers existingLBs, err := az.ListLB(service) if err != nil { return nil, nil, false, err } // check if the service already has a load balancer for i := range existingLBs { existingLB := existingLBs[i] if strings.EqualFold(to.String(existingLB.Name), clusterName) && useMultipleSLBs { cleanedLB, err := az.cleanBackendpoolForPrimarySLB(&existingLB, service, clusterName) if err != nil { return nil, nil, false, err } existingLB = *cleanedLB } if strings.EqualFold(*existingLB.Name, defaultLBName) { defaultLB = &existingLB } if isInternalLoadBalancer(&existingLB) != isInternal { continue } status, err = az.getServiceLoadBalancerStatus(service, &existingLB) if err != nil { return nil, nil, false, err } if status == nil { // service is not on this load balancer continue } return &existingLB, status, true, nil } hasMode, _, _ := getServiceLoadBalancerMode(service) useSingleSLB := az.useStandardLoadBalancer() && !az.EnableMultipleStandardLoadBalancers if useSingleSLB && hasMode { klog.Warningf("single standard load balancer doesn't work with annotation %q, would ignore it", ServiceAnnotationLoadBalancerMode) } // Service does not have a load balancer, select one. // Single standard load balancer doesn't need this because // all backends nodes should be added to same LB. if wantLb && !useSingleSLB { // select new load balancer for service selectedLB, exists, err := az.selectLoadBalancer(clusterName, service, &existingLBs, nodes) if err != nil { return nil, nil, false, err } return selectedLB, nil, exists, err } // create a default LB with meta data if not present if defaultLB == nil { defaultLB = &network.LoadBalancer{ Name: &defaultLBName, Location: &az.Location, LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{}, } if az.useStandardLoadBalancer() { defaultLB.Sku = &network.LoadBalancerSku{ Name: network.LoadBalancerSkuNameStandard, } } } return defaultLB, nil, false, nil } // selectLoadBalancer selects load balancer for the service in the cluster. // The selection algorithm selects the load balancer which currently has // the minimum lb rules. If there are multiple LBs with same number of rules, // then selects the first one (sorted based on name). func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) vmSetNames, err := az.VMSet.GetVMSetNames(service, nodes) if err != nil { klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) return nil, false, err } klog.V(2).Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames) mapExistingLBs := map[string]network.LoadBalancer{} for _, lb := range *existingLBs { mapExistingLBs[*lb.Name] = lb } selectedLBRuleCount := math.MaxInt32 for _, currASName := range *vmSetNames { currLBName := az.getAzureLoadBalancerName(clusterName, currASName, isInternal) lb, exists := mapExistingLBs[currLBName] if !exists { // select this LB as this is a new LB and will have minimum rules // create tmp lb struct to hold metadata for the new load-balancer var loadBalancerSKU network.LoadBalancerSkuName if az.useStandardLoadBalancer() { loadBalancerSKU = network.LoadBalancerSkuNameStandard } else { loadBalancerSKU = network.LoadBalancerSkuNameBasic } selectedLB = &network.LoadBalancer{ Name: &currLBName, Location: &az.Location, Sku: &network.LoadBalancerSku{Name: loadBalancerSKU}, LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{}, } return selectedLB, false, nil } lbRules := *lb.LoadBalancingRules currLBRuleCount := 0 if lbRules != nil { currLBRuleCount = len(lbRules) } if currLBRuleCount < selectedLBRuleCount { selectedLBRuleCount = currLBRuleCount selectedLB = &lb } } if selectedLB == nil { err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected VM sets %v", clusterName, serviceName, isInternal, *vmSetNames) klog.Error(err) return nil, false, err } // validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount { err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, vmSetNames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *vmSetNames) klog.Error(err) return selectedLB, existsLb, err } return selectedLB, existsLb, nil } func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) { if lb == nil { klog.V(10).Info("getServiceLoadBalancerStatus: lb is nil") return nil, nil } if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil { klog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil") return nil, nil } isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) for _, ipConfiguration := range *lb.FrontendIPConfigurations { owns, isPrimaryService, err := az.serviceOwnsFrontendIP(ipConfiguration, service) if err != nil { return nil, fmt.Errorf("get(%s): lb(%s) - failed to filter frontend IP configs with error: %v", serviceName, to.String(lb.Name), err) } if owns { klog.V(2).Infof("get(%s): lb(%s) - found frontend IP config, primary service: %v", serviceName, to.String(lb.Name), isPrimaryService) var lbIP *string if isInternal { lbIP = ipConfiguration.PrivateIPAddress } else { if ipConfiguration.PublicIPAddress == nil { return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress is Nil", serviceName, *lb.Name) } pipID := ipConfiguration.PublicIPAddress.ID if pipID == nil { return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress ID is Nil", serviceName, *lb.Name) } pipName, err := getLastSegment(*pipID, "/") if err != nil { return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID) } pip, existsPip, err := az.getPublicIPAddress(az.getPublicIPAddressResourceGroup(service), pipName) if err != nil { return nil, err } if existsPip { lbIP = pip.IPAddress } } klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), to.String(ipConfiguration.Name), serviceName) return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: to.String(lbIP)}}}, nil } } return nil, nil } func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) (string, bool, error) { var shouldPIPExisted bool if name, found := service.Annotations[ServiceAnnotationPIPName]; found && name != "" { shouldPIPExisted = true return name, shouldPIPExisted, nil } pipResourceGroup := az.getPublicIPAddressResourceGroup(service) loadBalancerIP := service.Spec.LoadBalancerIP // Assume that the service without loadBalancerIP set is a primary service. // If a secondary service doesn't set the loadBalancerIP, it is not allowed to share the IP. if len(loadBalancerIP) == 0 { return az.getPublicIPName(clusterName, service), shouldPIPExisted, nil } // For the services with loadBalancerIP set, an existing public IP is required, primary // or secondary, or a public IP not found error would be reported. pip, err := az.findMatchedPIPByLoadBalancerIP(service, loadBalancerIP, pipResourceGroup) if err != nil { return "", shouldPIPExisted, err } if pip != nil && pip.Name != nil { return *pip.Name, shouldPIPExisted, nil } return "", shouldPIPExisted, fmt.Errorf("user supplied IP Address %s was not found in resource group %s", loadBalancerIP, pipResourceGroup) } func (az *Cloud) findMatchedPIPByLoadBalancerIP(service *v1.Service, loadBalancerIP, pipResourceGroup string) (*network.PublicIPAddress, error) { pips, err := az.ListPIP(service, pipResourceGroup) if err != nil { return nil, err } for _, pip := range pips { if pip.PublicIPAddressPropertiesFormat.IPAddress != nil && *pip.PublicIPAddressPropertiesFormat.IPAddress == loadBalancerIP { return &pip, nil } } return nil, fmt.Errorf("findMatchedPIPByLoadBalancerIP: cannot find public IP with IP address %s in resource group %s", loadBalancerIP, pipResourceGroup) } func flipServiceInternalAnnotation(service *v1.Service) *v1.Service { copyService := service.DeepCopy() if copyService.Annotations == nil { copyService.Annotations = map[string]string{} } if v, ok := copyService.Annotations[ServiceAnnotationLoadBalancerInternal]; ok && v == "true" { // If it is internal now, we make it external by remove the annotation delete(copyService.Annotations, ServiceAnnotationLoadBalancerInternal) } else { // If it is external now, we make it internal copyService.Annotations[ServiceAnnotationLoadBalancerInternal] = "true" } return copyService } func updateServiceLoadBalancerIP(service *v1.Service, serviceIP string) *v1.Service { copyService := service.DeepCopy() if len(serviceIP) > 0 && copyService != nil { copyService.Spec.LoadBalancerIP = serviceIP } return copyService } func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, service *v1.Service, isInternalLb bool) (string, error) { if len(service.Spec.LoadBalancerIP) > 0 { return service.Spec.LoadBalancerIP, nil } if len(service.Status.LoadBalancer.Ingress) > 0 && len(service.Status.LoadBalancer.Ingress[0].IP) > 0 { return service.Status.LoadBalancer.Ingress[0].IP, nil } _, lbStatus, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false) if err != nil { return "", err } if !existsLb { klog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name) return "", nil } if len(lbStatus.Ingress) < 1 { klog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name) return "", nil } return lbStatus.Ingress[0].IP, nil } func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel, clusterName string, shouldPIPExisted, foundDNSLabelAnnotation bool) (*network.PublicIPAddress, error) { pipResourceGroup := az.getPublicIPAddressResourceGroup(service) pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName) if err != nil { return nil, err } serviceName := getServiceName(service) if existsPip { // ensure that the service tag is good changed, err := bindServicesToPIP(&pip, []string{serviceName}, false) if err != nil { return nil, err } // return if pip exist and dns label is the same if strings.EqualFold(getDomainNameLabel(&pip), domainNameLabel) { if existingServiceName, ok := pip.Tags[serviceUsingDNSKey]; ok && strings.EqualFold(*existingServiceName, serviceName) { klog.V(6).Infof("ensurePublicIPExists for service(%s): pip(%s) - "+ "the service is using the DNS label on the public IP", serviceName, pipName) var rerr *retry.Error if changed { klog.V(2).Infof("ensurePublicIPExists: updating the PIP %s for the incoming service %s", pipName, serviceName) err = az.CreateOrUpdatePIP(service, pipResourceGroup, pip) if err != nil { return nil, err } ctx, cancel := getContextWithCancel() defer cancel() pip, rerr = az.PublicIPAddressesClient.Get(ctx, pipResourceGroup, *pip.Name, "") if rerr != nil { return nil, rerr.Error() } } return &pip, nil } } klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - updating", serviceName, *pip.Name) if pip.PublicIPAddressPropertiesFormat == nil { pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{ PublicIPAllocationMethod: network.Static, } } } else { if shouldPIPExisted { return nil, fmt.Errorf("PublicIP from annotation azure-pip-name=%s for service %s doesn't exist", pipName, serviceName) } pip.Name = to.StringPtr(pipName) pip.Location = to.StringPtr(az.Location) pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{ PublicIPAllocationMethod: network.Static, IPTags: getServiceIPTagRequestForPublicIP(service).IPTags, } pip.Tags = map[string]*string{ serviceTagKey: to.StringPtr(""), clusterNameKey: &clusterName, } if _, err = bindServicesToPIP(&pip, []string{serviceName}, false); err != nil { return nil, err } if az.useStandardLoadBalancer() { pip.Sku = &network.PublicIPAddressSku{ Name: network.PublicIPAddressSkuNameStandard, } } klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) } if foundDNSLabelAnnotation { if existingServiceName, ok := pip.Tags[serviceUsingDNSKey]; ok { if !strings.EqualFold(to.String(existingServiceName), serviceName) { return nil, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing service %s consuming the DNS label on the public IP, so the service cannot set the DNS label annotation with this value", serviceName, pipName, *existingServiceName) } } if len(domainNameLabel) == 0 { pip.PublicIPAddressPropertiesFormat.DNSSettings = nil } else { if pip.PublicIPAddressPropertiesFormat.DNSSettings == nil || pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel == nil { klog.V(6).Infof("ensurePublicIPExists for service(%s): pip(%s) - no existing DNS label on the public IP, create one", serviceName, pipName) pip.PublicIPAddressPropertiesFormat.DNSSettings = &network.PublicIPAddressDNSSettings{ DomainNameLabel: &domainNameLabel, } } else { existingDNSLabel := pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel if !strings.EqualFold(to.String(existingDNSLabel), domainNameLabel) { return nil, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing DNS label %s on the public IP", serviceName, pipName, *existingDNSLabel) } } pip.Tags[serviceUsingDNSKey] = &serviceName } } // use the same family as the clusterIP as we support IPv6 single stack as well // as dual-stack clusters ipv6 := utilnet.IsIPv6String(service.Spec.ClusterIP) if ipv6 { pip.PublicIPAddressVersion = network.IPv6 klog.V(2).Infof("service(%s): pip(%s) - creating as ipv6 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP) pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Dynamic if az.useStandardLoadBalancer() { // standard sku must have static allocation method for ipv6 pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Static } } else { pip.PublicIPAddressVersion = network.IPv4 klog.V(2).Infof("service(%s): pip(%s) - creating as ipv4 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP) } klog.V(2).Infof("CreateOrUpdatePIP(%s, %q): start", pipResourceGroup, *pip.Name) err = az.CreateOrUpdatePIP(service, pipResourceGroup, pip) if err != nil { klog.V(2).Infof("ensure(%s) abort backoff: pip(%s)", serviceName, *pip.Name) return nil, err } klog.V(10).Infof("CreateOrUpdatePIP(%s, %q): end", pipResourceGroup, *pip.Name) ctx, cancel := getContextWithCancel() defer cancel() pip, rerr := az.PublicIPAddressesClient.Get(ctx, pipResourceGroup, *pip.Name, "") if rerr != nil { return nil, rerr.Error() } return &pip, nil } type serviceIPTagRequest struct { IPTagsRequestedByAnnotation bool IPTags *[]network.IPTag } // Get the ip tag Request for the public ip from service annotations. func getServiceIPTagRequestForPublicIP(service *v1.Service) serviceIPTagRequest { if service != nil { if ipTagString, found := service.Annotations[ServiceAnnotationIPTagsForPublicIP]; found { return serviceIPTagRequest{ IPTagsRequestedByAnnotation: true, IPTags: convertIPTagMapToSlice(getIPTagMap(ipTagString)), } } } return serviceIPTagRequest{ IPTagsRequestedByAnnotation: false, IPTags: nil, } } func getIPTagMap(ipTagString string) map[string]string { outputMap := make(map[string]string) commaDelimitedPairs := strings.Split(strings.TrimSpace(ipTagString), ",") for _, commaDelimitedPair := range commaDelimitedPairs { splitKeyValue := strings.Split(commaDelimitedPair, "=") // Include only valid pairs in the return value // Last Write wins. if len(splitKeyValue) == 2 { tagKey := strings.TrimSpace(splitKeyValue[0]) tagValue := strings.TrimSpace(splitKeyValue[1]) outputMap[tagKey] = tagValue } } return outputMap } func sortIPTags(ipTags *[]network.IPTag) { if ipTags != nil { sort.Slice(*ipTags, func(i, j int) bool { ipTag := *ipTags return to.String(ipTag[i].IPTagType) < to.String(ipTag[j].IPTagType) || to.String(ipTag[i].Tag) < to.String(ipTag[j].Tag) }) } } func areIPTagsEquivalent(ipTags1 *[]network.IPTag, ipTags2 *[]network.IPTag) bool { sortIPTags(ipTags1) sortIPTags(ipTags2) if ipTags1 == nil { ipTags1 = &[]network.IPTag{} } if ipTags2 == nil { ipTags2 = &[]network.IPTag{} } return reflect.DeepEqual(ipTags1, ipTags2) } func convertIPTagMapToSlice(ipTagMap map[string]string) *[]network.IPTag { if ipTagMap == nil { return nil } if len(ipTagMap) == 0 { return &[]network.IPTag{} } outputTags := []network.IPTag{} for k, v := range ipTagMap { ipTag := network.IPTag{ IPTagType: to.StringPtr(k), Tag: to.StringPtr(v), } outputTags = append(outputTags, ipTag) } return &outputTags } func getDomainNameLabel(pip *network.PublicIPAddress) string { if pip == nil || pip.PublicIPAddressPropertiesFormat == nil || pip.PublicIPAddressPropertiesFormat.DNSSettings == nil { return "" } return to.String(pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel) } func getIdleTimeout(s *v1.Service) (*int32, error) { const ( min = 4 max = 30 ) val, ok := s.Annotations[ServiceAnnotationLoadBalancerIdleTimeout] if !ok { // Return a nil here as this will set the value to the azure default return nil, nil } errInvalidTimeout := fmt.Errorf("idle timeout value must be a whole number representing minutes between %d and %d", min, max) to, err := strconv.Atoi(val) if err != nil { return nil, fmt.Errorf("error parsing idle timeout value: %v: %v", err, errInvalidTimeout) } to32 := int32(to) if to32 < min || to32 > max { return nil, errInvalidTimeout } return &to32, nil } func (az *Cloud) isFrontendIPChanged(clusterName string, config network.FrontendIPConfiguration, service *v1.Service, lbFrontendIPConfigName string) (bool, error) { isServiceOwnsFrontendIP, isPrimaryService, err := az.serviceOwnsFrontendIP(config, service) if err != nil { return false, err } if isServiceOwnsFrontendIP && isPrimaryService && !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) { return true, nil } if !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) { return false, nil } loadBalancerIP := service.Spec.LoadBalancerIP isInternal := requiresInternalLoadBalancer(service) if isInternal { // Judge subnet subnetName := subnet(service) if subnetName != nil { subnet, existsSubnet, err := az.getSubnet(az.VnetName, *subnetName) if err != nil { return false, err } if !existsSubnet { return false, fmt.Errorf("failed to get subnet") } if config.Subnet != nil && !strings.EqualFold(to.String(config.Subnet.Name), to.String(subnet.Name)) { return true, nil } } if loadBalancerIP == "" { return config.PrivateIPAllocationMethod == network.Static, nil } return config.PrivateIPAllocationMethod != network.Static || !strings.EqualFold(loadBalancerIP, to.String(config.PrivateIPAddress)), nil } pipName, _, err := az.determinePublicIPName(clusterName, service) if err != nil { return false, err } pipResourceGroup := az.getPublicIPAddressResourceGroup(service) pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName) if err != nil { return false, err } if !existsPip { return true, nil } return config.PublicIPAddress != nil && !strings.EqualFold(to.String(pip.ID), to.String(config.PublicIPAddress.ID)), nil } // isFrontendIPConfigUnsafeToDelete checks if a frontend IP config is safe to be deleted. // It is safe to be deleted if and only if there is no reference from other // loadBalancing resources, including loadBalancing rules, outbound rules, inbound NAT rules // and inbound NAT pools. func (az *Cloud) isFrontendIPConfigUnsafeToDelete( lb *network.LoadBalancer, service *v1.Service, fipConfigID *string, ) (bool, error) { if lb == nil || fipConfigID == nil || *fipConfigID == "" { return false, fmt.Errorf("isFrontendIPConfigUnsafeToDelete: incorrect parameters") } var ( lbRules []network.LoadBalancingRule outboundRules []network.OutboundRule inboundNatRules []network.InboundNatRule inboundNatPools []network.InboundNatPool unsafe bool ) if lb.LoadBalancerPropertiesFormat != nil { if lb.LoadBalancingRules != nil { lbRules = *lb.LoadBalancingRules } if lb.OutboundRules != nil { outboundRules = *lb.OutboundRules } if lb.InboundNatRules != nil { inboundNatRules = *lb.InboundNatRules } if lb.InboundNatPools != nil { inboundNatPools = *lb.InboundNatPools } } // check if there are load balancing rules from other services // referencing this frontend IP configuration for _, lbRule := range lbRules { if lbRule.LoadBalancingRulePropertiesFormat != nil && lbRule.FrontendIPConfiguration != nil && lbRule.FrontendIPConfiguration.ID != nil && strings.EqualFold(*lbRule.FrontendIPConfiguration.ID, *fipConfigID) { if !az.serviceOwnsRule(service, *lbRule.Name) { warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by load balancing rules of other services", *fipConfigID, *lb.Name) klog.Warning(warningMsg) az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg) unsafe = true break } } } // check if there are outbound rules // referencing this frontend IP configuration for _, outboundRule := range outboundRules { if outboundRule.OutboundRulePropertiesFormat != nil && outboundRule.FrontendIPConfigurations != nil { outboundRuleFIPConfigs := *outboundRule.FrontendIPConfigurations if found := findMatchedOutboundRuleFIPConfig(fipConfigID, outboundRuleFIPConfigs); found { warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the outbound rule %s", *fipConfigID, *lb.Name, *outboundRule.Name) klog.Warning(warningMsg) az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg) unsafe = true break } } } // check if there are inbound NAT rules // referencing this frontend IP configuration for _, inboundNatRule := range inboundNatRules { if inboundNatRule.InboundNatRulePropertiesFormat != nil && inboundNatRule.FrontendIPConfiguration != nil && inboundNatRule.FrontendIPConfiguration.ID != nil && strings.EqualFold(*inboundNatRule.FrontendIPConfiguration.ID, *fipConfigID) { warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the inbound NAT rule %s", *fipConfigID, *lb.Name, *inboundNatRule.Name) klog.Warning(warningMsg) az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg) unsafe = true break } } // check if there are inbound NAT pools // referencing this frontend IP configuration for _, inboundNatPool := range inboundNatPools { if inboundNatPool.InboundNatPoolPropertiesFormat != nil && inboundNatPool.FrontendIPConfiguration != nil && inboundNatPool.FrontendIPConfiguration.ID != nil && strings.EqualFold(*inboundNatPool.FrontendIPConfiguration.ID, *fipConfigID) { warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the inbound NAT pool %s", *fipConfigID, *lb.Name, *inboundNatPool.Name) klog.Warning(warningMsg) az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg) unsafe = true break } } return unsafe, nil } func findMatchedOutboundRuleFIPConfig(fipConfigID *string, outboundRuleFIPConfigs []network.SubResource) bool { var found bool for _, config := range outboundRuleFIPConfigs { if config.ID != nil && strings.EqualFold(*config.ID, *fipConfigID) { found = true } } return found } func (az *Cloud) findFrontendIPConfigOfService( fipConfigs *[]network.FrontendIPConfiguration, service *v1.Service, ) (*network.FrontendIPConfiguration, bool, error) { for _, config := range *fipConfigs { owns, isPrimaryService, err := az.serviceOwnsFrontendIP(config, service) if err != nil { return nil, false, err } if owns { return &config, isPrimaryService, nil } } return nil, false, nil } func nodeNameInNodes(nodeName string, nodes []*v1.Node) bool { for _, node := range nodes { if strings.EqualFold(nodeName, node.Name) { return true } } return false } // reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup. // This also reconciles the Service's Ports with the LoadBalancer config. // This entails adding rules/probes for expected Ports and removing stale rules/ports. // nodes only used if wantLb is true func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) { isInternal := requiresInternalLoadBalancer(service) isBackendPoolPreConfigured := az.isBackendPoolPreConfigured(service) serviceName := getServiceName(service) klog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb) lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb) if err != nil { klog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err) return nil, err } lbName := *lb.Name lbResourceGroup := az.getLoadBalancerResourceGroup() klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s/%s) wantLb(%t) resolved load balancer name", serviceName, lbResourceGroup, lbName, wantLb) defaultLBFrontendIPConfigName := az.getDefaultFrontendIPConfigName(service) defaultLBFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbResourceGroup, defaultLBFrontendIPConfigName) lbBackendPoolName := getBackendPoolName(clusterName, service) lbBackendPoolID := az.getBackendPoolID(lbName, lbResourceGroup, lbBackendPoolName) lbIdleTimeout, err := getIdleTimeout(service) if wantLb && err != nil { return nil, err } dirtyLb := false // Ensure LoadBalancer's Backend Pool Configuration if wantLb { newBackendPools := []network.BackendAddressPool{} if lb.BackendAddressPools != nil { newBackendPools = *lb.BackendAddressPools } foundBackendPool := false for _, bp := range newBackendPools { if strings.EqualFold(*bp.Name, lbBackendPoolName) { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) foundBackendPool = true var backendIPConfigurationsToBeDeleted []network.InterfaceIPConfiguration if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil { for _, ipConf := range *bp.BackendIPConfigurations { ipConfID := to.String(ipConf.ID) nodeName, _, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfID) if err != nil { return nil, err } // If a node is not supposed to be included in the LB, it // would not be in the `nodes` slice. We need to check the nodes that // have been added to the LB's backendpool, find the unwanted ones and // delete them from the pool. if !nodeNameInNodes(nodeName, nodes) { klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName) // construct a backendPool that only contains the IP config of the node to be deleted backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)}) } } if len(backendIPConfigurationsToBeDeleted) > 0 { backendpoolToBeDeleted := &[]network.BackendAddressPool{ { ID: to.StringPtr(lbBackendPoolID), BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{ BackendIPConfigurations: &backendIPConfigurationsToBeDeleted, }, }, } vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) // decouple the backendPool from the node err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted) if err != nil { return nil, err } } } break } else { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name) } } if !foundBackendPool { if isBackendPoolPreConfigured { klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - PreConfiguredBackendPoolLoadBalancerTypes %s has been set but can not find corresponding backend pool, ignoring it", serviceName, wantLb, az.PreConfiguredBackendPoolLoadBalancerTypes) isBackendPoolPreConfigured = false } newBackendPools = append(newBackendPools, network.BackendAddressPool{ Name: to.StringPtr(lbBackendPoolName), }) klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb) dirtyLb = true lb.BackendAddressPools = &newBackendPools } } // Ensure LoadBalancer's Frontend IP Configurations dirtyConfigs := false newConfigs := []network.FrontendIPConfiguration{} if lb.FrontendIPConfigurations != nil { newConfigs = *lb.FrontendIPConfigurations } var ownedFIPConfig *network.FrontendIPConfiguration if !wantLb { for i := len(newConfigs) - 1; i >= 0; i-- { config := newConfigs[i] isServiceOwnsFrontendIP, _, err := az.serviceOwnsFrontendIP(config, service) if err != nil { return nil, err } if isServiceOwnsFrontendIP { unsafe, err := az.isFrontendIPConfigUnsafeToDelete(lb, service, config.ID) if err != nil { return nil, err } // If the frontend IP configuration is not being referenced by: // 1. loadBalancing rules of other services with different ports; // 2. outbound rules; // 3. inbound NAT rules; // 4. inbound NAT pools, // do the deletion, or skip it. if !unsafe { var configNameToBeDeleted string if newConfigs[i].Name != nil { configNameToBeDeleted = *newConfigs[i].Name klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, configNameToBeDeleted) } else { klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): nil name of lb frontendconfig", serviceName, wantLb) } newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } } } } else { for i := len(newConfigs) - 1; i >= 0; i-- { config := newConfigs[i] isFipChanged, err := az.isFrontendIPChanged(clusterName, config, service, defaultLBFrontendIPConfigName) if err != nil { return nil, err } if isFipChanged { klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } } ownedFIPConfig, _, err = az.findFrontendIPConfigOfService(&newConfigs, service) if err != nil { return nil, err } if ownedFIPConfig == nil { klog.V(4).Infof("ensure(%s): lb(%s) - creating a new frontend IP config", serviceName, lbName) // construct FrontendIPConfigurationPropertiesFormat var fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat if isInternal { // azure does not support ILB for IPv6 yet. // TODO: remove this check when ILB supports IPv6 *and* the SDK // have been rev'ed to 2019* version if utilnet.IsIPv6String(service.Spec.ClusterIP) { return nil, fmt.Errorf("ensure(%s): lb(%s) - internal load balancers does not support IPv6", serviceName, lbName) } subnetName := subnet(service) if subnetName == nil { subnetName = &az.SubnetName } subnet, existsSubnet, err := az.getSubnet(az.VnetName, *subnetName) if err != nil { return nil, err } if !existsSubnet { return nil, fmt.Errorf("ensure(%s): lb(%s) - failed to get subnet: %s/%s", serviceName, lbName, az.VnetName, az.SubnetName) } configProperties := network.FrontendIPConfigurationPropertiesFormat{ Subnet: &subnet, } loadBalancerIP := service.Spec.LoadBalancerIP if loadBalancerIP != "" { configProperties.PrivateIPAllocationMethod = network.Static configProperties.PrivateIPAddress = &loadBalancerIP } else { // We'll need to call GetLoadBalancer later to retrieve allocated IP. configProperties.PrivateIPAllocationMethod = network.Dynamic } fipConfigurationProperties = &configProperties } else { pipName, shouldPIPExisted, err := az.determinePublicIPName(clusterName, service) if err != nil { return nil, err } domainNameLabel, found := getPublicIPDomainNameLabel(service) pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel, clusterName, shouldPIPExisted, found) if err != nil { return nil, err } fipConfigurationProperties = &network.FrontendIPConfigurationPropertiesFormat{ PublicIPAddress: &network.PublicIPAddress{ID: pip.ID}, } } newConfigs = append(newConfigs, network.FrontendIPConfiguration{ Name: to.StringPtr(defaultLBFrontendIPConfigName), ID: to.StringPtr(fmt.Sprintf(frontendIPConfigIDTemplate, az.SubscriptionID, az.ResourceGroup, *lb.Name, defaultLBFrontendIPConfigName)), FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties, }) klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, defaultLBFrontendIPConfigName) dirtyConfigs = true } } if dirtyConfigs { dirtyLb = true lb.FrontendIPConfigurations = &newConfigs } // update probes/rules if ownedFIPConfig != nil { if ownedFIPConfig.ID != nil { defaultLBFrontendIPConfigID = *ownedFIPConfig.ID } else { return nil, fmt.Errorf("reconcileLoadBalancer for service (%s)(%t): nil ID for frontend IP config", serviceName, wantLb) } } if wantLb { err = az.checkLoadBalancerResourcesConflicted(lb, defaultLBFrontendIPConfigID, service) if err != nil { return nil, err } } expectedProbes, expectedRules, err := az.reconcileLoadBalancerRule(service, wantLb, defaultLBFrontendIPConfigID, lbBackendPoolID, lbName, lbIdleTimeout) if err != nil { return nil, err } // remove unwanted probes dirtyProbes := false var updatedProbes []network.Probe if lb.Probes != nil { updatedProbes = *lb.Probes } for i := len(updatedProbes) - 1; i >= 0; i-- { existingProbe := updatedProbes[i] if az.serviceOwnsRule(service, *existingProbe.Name) { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) keepProbe := false if findProbe(expectedProbes, existingProbe) { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) keepProbe = true } if !keepProbe { updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...) klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) dirtyProbes = true } } } // add missing, wanted probes for _, expectedProbe := range expectedProbes { foundProbe := false if findProbe(updatedProbes, expectedProbe) { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) foundProbe = true } if !foundProbe { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) updatedProbes = append(updatedProbes, expectedProbe) dirtyProbes = true } } if dirtyProbes { dirtyLb = true lb.Probes = &updatedProbes } // update rules dirtyRules := false var updatedRules []network.LoadBalancingRule if lb.LoadBalancingRules != nil { updatedRules = *lb.LoadBalancingRules } // update rules: remove unwanted for i := len(updatedRules) - 1; i >= 0; i-- { existingRule := updatedRules[i] if az.serviceOwnsRule(service, *existingRule.Name) { keepRule := false klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) if findRule(expectedRules, existingRule, wantLb) { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtyRules = true } } } // update rules: add needed for _, expectedRule := range expectedRules { foundRule := false if findRule(updatedRules, expectedRule, wantLb) { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if !foundRule { klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) updatedRules = append(updatedRules, expectedRule) dirtyRules = true } } if dirtyRules { dirtyLb = true lb.LoadBalancingRules = &updatedRules } changed := az.ensureLoadBalancerTagged(lb) if changed { dirtyLb = true } // We don't care if the LB exists or not // We only care about if there is any change in the LB, which means dirtyLB // If it is not exist, and no change to that, we don't CreateOrUpdate LB if dirtyLb { if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 { if isBackendPoolPreConfigured { klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - ignore cleanup of dirty lb because the lb is pre-configured", serviceName, lbName) } else { // When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself, // because an Azure load balancer cannot have an empty FrontendIPConfigurations collection klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) klog.V(10).Infof("EnsureBackendPoolDeleted(%s,%s) for service %s: start", lbBackendPoolID, vmSetName, serviceName) if _, ok := az.VMSet.(*availabilitySet); ok { // do nothing for availability set lb.BackendAddressPools = nil } err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) if err != nil { klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err) return nil, err } klog.V(10).Infof("EnsureBackendPoolDeleted(%s) for service %s: end", lbBackendPoolID, serviceName) // Remove the LB. klog.V(10).Infof("reconcileLoadBalancer: az.DeleteLB(%q): start", lbName) err = az.DeleteLB(service, lbName) if err != nil { klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) return nil, err } klog.V(10).Infof("az.DeleteLB(%q): end", lbName) } } else { klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) err := az.CreateOrUpdateLB(service, *lb) if err != nil { klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName) return nil, err } if isInternal { // Refresh updated lb which will be used later in other places. newLB, exist, err := az.getAzureLoadBalancer(lbName, azcache.CacheReadTypeDefault) if err != nil { klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) return nil, err } if !exist { return nil, fmt.Errorf("load balancer %q not found", lbName) } lb = &newLB } } } if wantLb && nodes != nil && !isBackendPoolPreConfigured { // Add the machines to the backend pool if they're not already vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) // Etag would be changed when updating backend pools, so invalidate lbCache after it. defer az.lbCache.Delete(lbName) err := az.VMSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal) if err != nil { return nil, err } } klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName) return lb, nil } // checkLoadBalancerResourcesConflicted checks if the service is consuming // ports which are conflicted with the existing loadBalancer resources, // including inbound NAT rule, inbound NAT pools and loadBalancing rules func (az *Cloud) checkLoadBalancerResourcesConflicted( lb *network.LoadBalancer, frontendIPConfigID string, service *v1.Service, ) error { if service.Spec.Ports == nil { return nil } ports := service.Spec.Ports for _, port := range ports { if lb.LoadBalancingRules != nil { for _, rule := range *lb.LoadBalancingRules { if rule.LoadBalancingRulePropertiesFormat != nil && rule.FrontendIPConfiguration != nil && rule.FrontendIPConfiguration.ID != nil && strings.EqualFold(*rule.FrontendIPConfiguration.ID, frontendIPConfigID) && strings.EqualFold(string(rule.Protocol), string(port.Protocol)) && rule.FrontendPort != nil && *rule.FrontendPort == port.Port { // ignore self-owned rules for unit test if rule.Name != nil && az.serviceOwnsRule(service, *rule.Name) { continue } return fmt.Errorf("checkLoadBalancerResourcesConflicted: service port %s is trying to "+ "consume the port %d which is being referenced by an existing loadBalancing rule %s with "+ "the same protocol %s and frontend IP config with ID %s", port.Name, *rule.FrontendPort, *rule.Name, rule.Protocol, *rule.FrontendIPConfiguration.ID) } } } if lb.InboundNatRules != nil { for _, inboundNatRule := range *lb.InboundNatRules { if inboundNatRule.InboundNatRulePropertiesFormat != nil && inboundNatRule.FrontendIPConfiguration != nil && inboundNatRule.FrontendIPConfiguration.ID != nil && strings.EqualFold(*inboundNatRule.FrontendIPConfiguration.ID, frontendIPConfigID) && strings.EqualFold(string(inboundNatRule.Protocol), string(port.Protocol)) && inboundNatRule.FrontendPort != nil && *inboundNatRule.FrontendPort == port.Port { return fmt.Errorf("checkLoadBalancerResourcesConflicted: service port %s is trying to "+ "consume the port %d which is being referenced by an existing inbound NAT rule %s with "+ "the same protocol %s and frontend IP config with ID %s", port.Name, *inboundNatRule.FrontendPort, *inboundNatRule.Name, inboundNatRule.Protocol, *inboundNatRule.FrontendIPConfiguration.ID) } } } if lb.InboundNatPools != nil { for _, pool := range *lb.InboundNatPools { if pool.InboundNatPoolPropertiesFormat != nil && pool.FrontendIPConfiguration != nil && pool.FrontendIPConfiguration.ID != nil && strings.EqualFold(*pool.FrontendIPConfiguration.ID, frontendIPConfigID) && strings.EqualFold(string(pool.Protocol), string(port.Protocol)) && pool.FrontendPortRangeStart != nil && pool.FrontendPortRangeEnd != nil && *pool.FrontendPortRangeStart <= port.Port && *pool.FrontendPortRangeEnd >= port.Port { return fmt.Errorf("checkLoadBalancerResourcesConflicted: service port %s is trying to "+ "consume the port %d which is being in the range (%d-%d) of an existing "+ "inbound NAT pool %s with the same protocol %s and frontend IP config with ID %s", port.Name, port.Port, *pool.FrontendPortRangeStart, *pool.FrontendPortRangeEnd, *pool.Name, pool.Protocol, *pool.FrontendIPConfiguration.ID) } } } } return nil } func parseHealthProbeProtocolAndPath(service *v1.Service) (string, string) { var protocol, path string if v, ok := service.Annotations[ServiceAnnotationLoadBalancerHealthProbeProtocol]; ok { protocol = v } else { return protocol, path } // ignore the request path if using TCP if strings.EqualFold(protocol, string(network.ProbeProtocolHTTP)) || strings.EqualFold(protocol, string(network.ProbeProtocolHTTPS)) { if v, ok := service.Annotations[ServiceAnnotationLoadBalancerHealthProbeRequestPath]; ok { path = v } } return protocol, path } func (az *Cloud) reconcileLoadBalancerRule( service *v1.Service, wantLb bool, lbFrontendIPConfigID string, lbBackendPoolID string, lbName string, lbIdleTimeout *int32) ([]network.Probe, []network.LoadBalancingRule, error) { var ports []v1.ServicePort if wantLb { ports = service.Spec.Ports } else { ports = []v1.ServicePort{} } var enableTCPReset *bool if az.useStandardLoadBalancer() { enableTCPReset = to.BoolPtr(true) if _, ok := service.Annotations[ServiceAnnotationLoadBalancerDisableTCPReset]; ok { klog.Warning("annotation service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset has been removed as of Kubernetes 1.20. TCP Resets are always enabled on Standard SKU load balancers.") } } var expectedProbes []network.Probe var expectedRules []network.LoadBalancingRule for _, port := range ports { lbRuleName := az.getLoadBalancerRuleName(service, port.Protocol, port.Port) klog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) if err != nil { return expectedProbes, expectedRules, err } probeProtocol, requestPath := parseHealthProbeProtocolAndPath(service) if servicehelpers.NeedsHealthCheck(service) { podPresencePath, podPresencePort := servicehelpers.GetServiceHealthCheckPathPort(service) if probeProtocol == "" { probeProtocol = string(network.ProbeProtocolHTTP) } if requestPath == "" { requestPath = podPresencePath } expectedProbes = append(expectedProbes, network.Probe{ Name: &lbRuleName, ProbePropertiesFormat: &network.ProbePropertiesFormat{ RequestPath: to.StringPtr(requestPath), Protocol: network.ProbeProtocol(probeProtocol), Port: to.Int32Ptr(podPresencePort), IntervalInSeconds: to.Int32Ptr(5), NumberOfProbes: to.Int32Ptr(2), }, }) } else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP { // we only add the expected probe if we're doing TCP if probeProtocol == "" { probeProtocol = string(*probeProto) } var actualPath *string if !strings.EqualFold(probeProtocol, string(network.ProbeProtocolTCP)) { if requestPath != "" { actualPath = to.StringPtr(requestPath) } else { actualPath = to.StringPtr("/healthz") } } expectedProbes = append(expectedProbes, network.Probe{ Name: &lbRuleName, ProbePropertiesFormat: &network.ProbePropertiesFormat{ Protocol: network.ProbeProtocol(probeProtocol), RequestPath: actualPath, Port: to.Int32Ptr(port.NodePort), IntervalInSeconds: to.Int32Ptr(5), NumberOfProbes: to.Int32Ptr(2), }, }) } loadDistribution := network.LoadDistributionDefault if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { loadDistribution = network.LoadDistributionSourceIP } expectedRule := network.LoadBalancingRule{ Name: &lbRuleName, LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ Protocol: *transportProto, FrontendIPConfiguration: &network.SubResource{ ID: to.StringPtr(lbFrontendIPConfigID), }, BackendAddressPool: &network.SubResource{ ID: to.StringPtr(lbBackendPoolID), }, LoadDistribution: loadDistribution, FrontendPort: to.Int32Ptr(port.Port), BackendPort: to.Int32Ptr(port.Port), DisableOutboundSnat: to.BoolPtr(az.disableLoadBalancerOutboundSNAT()), EnableTCPReset: enableTCPReset, EnableFloatingIP: to.BoolPtr(true), }, } if port.Protocol == v1.ProtocolTCP { expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout } if requiresInternalLoadBalancer(service) && strings.EqualFold(az.LoadBalancerSku, loadBalancerSkuStandard) && strings.EqualFold(service.Annotations[ServiceAnnotationLoadBalancerEnableHighAvailabilityPorts], "true") { expectedRule.FrontendPort = to.Int32Ptr(0) expectedRule.BackendPort = to.Int32Ptr(0) expectedRule.Protocol = network.TransportProtocolAll } // we didn't construct the probe objects for UDP or SCTP because they're not allowed on Azure. // However, when externalTrafficPolicy is Local, Kubernetes HTTP health check would be used for probing. if servicehelpers.NeedsHealthCheck(service) || (port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP) { expectedRule.Probe = &network.SubResource{ ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), lbRuleName)), } } expectedRules = append(expectedRules, expectedRule) } return expectedProbes, expectedRules, nil } // This reconciles the Network Security Group similar to how the LB is reconciled. // This entails adding required, missing SecurityRules and removing stale rules. func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) { serviceName := getServiceName(service) klog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName) ports := service.Spec.Ports if ports == nil { if useSharedSecurityRule(service) { klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name) return nil, fmt.Errorf("no port info for reconciling shared rule for service %s", service.Name) } ports = []v1.ServicePort{} } sg, err := az.getSecurityGroup(azcache.CacheReadTypeDefault) if err != nil { return nil, err } destinationIPAddress := "" if wantLb && lbIP == nil { return nil, fmt.Errorf("no load balancer IP for setting up security rules for service %s", service.Name) } if lbIP != nil { destinationIPAddress = *lbIP } if destinationIPAddress == "" { destinationIPAddress = "*" } sourceRanges, err := servicehelpers.GetLoadBalancerSourceRanges(service) if err != nil { return nil, err } serviceTags := getServiceTags(service) if len(serviceTags) != 0 { if _, ok := sourceRanges[defaultLoadBalancerSourceRanges]; ok { delete(sourceRanges, defaultLoadBalancerSourceRanges) } } var sourceAddressPrefixes []string if (sourceRanges == nil || servicehelpers.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 { if !requiresInternalLoadBalancer(service) { sourceAddressPrefixes = []string{"Internet"} } } else { for _, ip := range sourceRanges { sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String()) } sourceAddressPrefixes = append(sourceAddressPrefixes, serviceTags...) } expectedSecurityRules := []network.SecurityRule{} if wantLb { expectedSecurityRules = make([]network.SecurityRule, len(ports)*len(sourceAddressPrefixes)) for i, port := range ports { _, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) if err != nil { return nil, err } for j := range sourceAddressPrefixes { ix := i*len(sourceAddressPrefixes) + j securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j]) expectedSecurityRules[ix] = network.SecurityRule{ Name: to.StringPtr(securityRuleName), SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ Protocol: *securityProto, SourcePortRange: to.StringPtr("*"), DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), DestinationAddressPrefix: to.StringPtr(destinationIPAddress), Access: network.SecurityRuleAccessAllow, Direction: network.SecurityRuleDirectionInbound, }, } } } } for _, r := range expectedSecurityRules { klog.V(10).Infof("Expecting security rule for %s: %s:%s -> %s:%s", service.Name, *r.SourceAddressPrefix, *r.SourcePortRange, *r.DestinationAddressPrefix, *r.DestinationPortRange) } // update security rules dirtySg := false var updatedRules []network.SecurityRule if sg.SecurityGroupPropertiesFormat != nil && sg.SecurityGroupPropertiesFormat.SecurityRules != nil { updatedRules = *sg.SecurityGroupPropertiesFormat.SecurityRules } for _, r := range updatedRules { klog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) } // update security rules: remove unwanted rules that belong privately // to this service for i := len(updatedRules) - 1; i >= 0; i-- { existingRule := updatedRules[i] if az.serviceOwnsRule(service, *existingRule.Name) { klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) keepRule := false if findSecurityRule(expectedSecurityRules, existingRule) { klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtySg = true } } } // update security rules: if the service uses a shared rule and is being deleted, // then remove it from the shared rule if useSharedSecurityRule(service) && !wantLb { for _, port := range ports { for _, sourceAddressPrefix := range sourceAddressPrefixes { sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix) sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) if !sharedRuleFound { klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) return nil, fmt.Errorf("expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) } if sharedRule.DestinationAddressPrefixes == nil { klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) return nil, fmt.Errorf("expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) } existingPrefixes := *sharedRule.DestinationAddressPrefixes addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) if !found { klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) return nil, fmt.Errorf("expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) } if len(existingPrefixes) == 1 { updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...) } else { newDestinations := append(existingPrefixes[:addressIndex], existingPrefixes[addressIndex+1:]...) sharedRule.DestinationAddressPrefixes = &newDestinations updatedRules[sharedIndex] = sharedRule } dirtySg = true } } } // update security rules: prepare rules for consolidation for index, rule := range updatedRules { if allowsConsolidation(rule) { updatedRules[index] = makeConsolidatable(rule) } } for index, rule := range expectedSecurityRules { if allowsConsolidation(rule) { expectedSecurityRules[index] = makeConsolidatable(rule) } } // update security rules: add needed for _, expectedRule := range expectedSecurityRules { foundRule := false if findSecurityRule(updatedRules, expectedRule) { klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if foundRule && allowsConsolidation(expectedRule) { index, _ := findConsolidationCandidate(updatedRules, expectedRule) updatedRules[index] = consolidate(updatedRules[index], expectedRule) dirtySg = true } if !foundRule { klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) nextAvailablePriority, err := getNextAvailablePriority(updatedRules) if err != nil { return nil, err } expectedRule.Priority = to.Int32Ptr(nextAvailablePriority) updatedRules = append(updatedRules, expectedRule) dirtySg = true } } for _, r := range updatedRules { klog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) } changed := az.ensureSecurityGroupTagged(&sg) if changed { dirtySg = true } if dirtySg { sg.SecurityRules = &updatedRules klog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name) klog.V(10).Infof("CreateOrUpdateSecurityGroup(%q): start", *sg.Name) err := az.CreateOrUpdateSecurityGroup(sg) if err != nil { klog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) return nil, err } klog.V(10).Infof("CreateOrUpdateSecurityGroup(%q): end", *sg.Name) az.nsgCache.Delete(to.String(sg.Name)) } return &sg, nil } func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Service) bool { _, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nil, false) return existsLb && service.ObjectMeta.DeletionTimestamp == nil } func logSafe(s *string) string { if s == nil { return "(nil)" } return *s } func logSafeCollection(s *string, strs *[]string) string { if s == nil { if strs == nil { return "(nil)" } return "[" + strings.Join(*strs, ",") + "]" } return *s } func findSecurityRuleByName(rules []network.SecurityRule, ruleName string) (int, network.SecurityRule, bool) { for index, rule := range rules { if rule.Name != nil && strings.EqualFold(*rule.Name, ruleName) { return index, rule, true } } return 0, network.SecurityRule{}, false } func findIndex(strs []string, s string) (int, bool) { for index, str := range strs { if strings.EqualFold(str, s) { return index, true } } return 0, false } func allowsConsolidation(rule network.SecurityRule) bool { return strings.HasPrefix(to.String(rule.Name), "shared") } func findConsolidationCandidate(rules []network.SecurityRule, rule network.SecurityRule) (int, bool) { for index, r := range rules { if allowsConsolidation(r) { if strings.EqualFold(to.String(r.Name), to.String(rule.Name)) { return index, true } } } return 0, false } func makeConsolidatable(rule network.SecurityRule) network.SecurityRule { return network.SecurityRule{ Name: rule.Name, SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ Priority: rule.Priority, Protocol: rule.Protocol, SourcePortRange: rule.SourcePortRange, SourcePortRanges: rule.SourcePortRanges, DestinationPortRange: rule.DestinationPortRange, DestinationPortRanges: rule.DestinationPortRanges, SourceAddressPrefix: rule.SourceAddressPrefix, SourceAddressPrefixes: rule.SourceAddressPrefixes, DestinationAddressPrefixes: collectionOrSingle(rule.DestinationAddressPrefixes, rule.DestinationAddressPrefix), Access: rule.Access, Direction: rule.Direction, }, } } func consolidate(existingRule network.SecurityRule, newRule network.SecurityRule) network.SecurityRule { destinations := appendElements(existingRule.SecurityRulePropertiesFormat.DestinationAddressPrefixes, newRule.DestinationAddressPrefix, newRule.DestinationAddressPrefixes) destinations = deduplicate(destinations) // there are transient conditions during controller startup where it tries to add a service that is already added return network.SecurityRule{ Name: existingRule.Name, SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ Priority: existingRule.Priority, Protocol: existingRule.Protocol, SourcePortRange: existingRule.SourcePortRange, SourcePortRanges: existingRule.SourcePortRanges, DestinationPortRange: existingRule.DestinationPortRange, DestinationPortRanges: existingRule.DestinationPortRanges, SourceAddressPrefix: existingRule.SourceAddressPrefix, SourceAddressPrefixes: existingRule.SourceAddressPrefixes, DestinationAddressPrefixes: destinations, Access: existingRule.Access, Direction: existingRule.Direction, }, } } func collectionOrSingle(collection *[]string, s *string) *[]string { if collection != nil && len(*collection) > 0 { return collection } if s == nil { return &[]string{} } return &[]string{*s} } func appendElements(collection *[]string, appendString *string, appendStrings *[]string) *[]string { newCollection := []string{} if collection != nil { newCollection = append(newCollection, *collection...) } if appendString != nil { newCollection = append(newCollection, *appendString) } if appendStrings != nil { newCollection = append(newCollection, *appendStrings...) } return &newCollection } func deduplicate(collection *[]string) *[]string { if collection == nil { return nil } seen := map[string]bool{} result := make([]string, 0, len(*collection)) for _, v := range *collection { if seen[v] == true { // skip this element } else { seen[v] = true result = append(result, v) } } return &result } // Determine if we should release existing owned public IPs func shouldReleaseExistingOwnedPublicIP(existingPip *network.PublicIPAddress, lbShouldExist, lbIsInternal bool, desiredPipName, svcName string, ipTagRequest serviceIPTagRequest) bool { // Latch some variables for readability purposes. pipName := *(*existingPip).Name // Assume the current IP Tags are empty by default unless properties specify otherwise. currentIPTags := &[]network.IPTag{} pipPropertiesFormat := (*existingPip).PublicIPAddressPropertiesFormat if pipPropertiesFormat != nil { currentIPTags = (*pipPropertiesFormat).IPTags } // Check whether the public IP is being referenced by other service. // The owned public IP can be released only when there is not other service using it. if existingPip.Tags[serviceTagKey] != nil { // case 1: there is at least one reference when deleting the PIP if !lbShouldExist && len(parsePIPServiceTag(existingPip.Tags[serviceTagKey])) > 0 { return false } // case 2: there is at least one reference from other service if lbShouldExist && len(parsePIPServiceTag(existingPip.Tags[serviceTagKey])) > 1 { return false } } // Release the ip under the following criteria - // #1 - If we don't actually want a load balancer, return !lbShouldExist || // #2 - If the load balancer is internal, and thus doesn't require public exposure lbIsInternal || // #3 - If the name of this public ip does not match the desired name, (pipName != desiredPipName) || // #4 If the service annotations have specified the ip tags that the public ip must have, but they do not match the ip tags of the existing instance (ipTagRequest.IPTagsRequestedByAnnotation && !areIPTagsEquivalent(currentIPTags, ipTagRequest.IPTags)) } // ensurePIPTagged ensures the public IP of the service is tagged as configured func (az *Cloud) ensurePIPTagged(service *v1.Service, pip *network.PublicIPAddress) bool { changed := false configTags := parseTags(az.Tags) annotationTags := make(map[string]*string) if _, ok := service.Annotations[ServiceAnnotationAzurePIPTags]; ok { annotationTags = parseTags(service.Annotations[ServiceAnnotationAzurePIPTags]) } for k, v := range annotationTags { configTags[k] = v } // include the cluster name and service names tags when comparing var clusterName, serviceNames *string if v, ok := pip.Tags[clusterNameKey]; ok { clusterName = v } if v, ok := pip.Tags[serviceTagKey]; ok { serviceNames = v } if clusterName != nil { configTags[clusterNameKey] = clusterName } if serviceNames != nil { configTags[serviceTagKey] = serviceNames } for k, v := range configTags { if vv, ok := pip.Tags[k]; !ok || !strings.EqualFold(to.String(v), to.String(vv)) { pip.Tags[k] = v changed = true } } return changed } // This reconciles the PublicIP resources similar to how the LB is reconciled. func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbName string, wantLb bool) (*network.PublicIPAddress, error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) serviceIPTagRequest := getServiceIPTagRequestForPublicIP(service) var ( lb *network.LoadBalancer desiredPipName string err error shouldPIPExisted bool ) if !isInternal && wantLb { desiredPipName, shouldPIPExisted, err = az.determinePublicIPName(clusterName, service) if err != nil { return nil, err } } if lbName != "" { loadBalancer, _, err := az.getAzureLoadBalancer(lbName, azcache.CacheReadTypeDefault) if err != nil { return nil, err } lb = &loadBalancer } pipResourceGroup := az.getPublicIPAddressResourceGroup(service) pips, err := az.ListPIP(service, pipResourceGroup) if err != nil { return nil, err } var ( serviceAnnotationRequestsNamedPublicIP = shouldPIPExisted discoveredDesiredPublicIP bool deletedDesiredPublicIP bool pipsToBeDeleted []*network.PublicIPAddress pipsToBeUpdated []*network.PublicIPAddress ) for i := range pips { pip := pips[i] pipName := *pip.Name // If we've been told to use a specific public ip by the client, let's track whether or not it actually existed // when we inspect the set in Azure. discoveredDesiredPublicIP = discoveredDesiredPublicIP || wantLb && !isInternal && pipName == desiredPipName // Now, let's perform additional analysis to determine if we should release the public ips we have found. // We can only let them go if (a) they are owned by this service and (b) they meet the criteria for deletion. if serviceOwnsPublicIP(&pip, clusterName, serviceName) { var dirtyPIP, toBeDeleted bool if !wantLb { klog.V(2).Infof("reconcilePublicIP for service(%s): unbinding the service from pip %s", serviceName, *pip.Name) err = unbindServiceFromPIP(&pip, serviceName) if err != nil { return nil, err } dirtyPIP = true } changed := az.ensurePIPTagged(service, &pip) if changed { dirtyPIP = true } if shouldReleaseExistingOwnedPublicIP(&pip, wantLb, isInternal, desiredPipName, serviceName, serviceIPTagRequest) { // Then, release the public ip pipsToBeDeleted = append(pipsToBeDeleted, &pip) // Flag if we deleted the desired public ip deletedDesiredPublicIP = deletedDesiredPublicIP || pipName == desiredPipName // An aside: It would be unusual, but possible, for us to delete a public ip referred to explicitly by name // in Service annotations (which is usually reserved for non-service-owned externals), if that IP is tagged as // having been owned by a particular Kubernetes cluster. // If the pip is going to be deleted, we do not need to update it toBeDeleted = true } // Update tags of PIP only instead of deleting it. if !toBeDeleted && dirtyPIP { pipsToBeUpdated = append(pipsToBeUpdated, &pip) } } } if !isInternal && serviceAnnotationRequestsNamedPublicIP && !discoveredDesiredPublicIP && wantLb { return nil, fmt.Errorf("reconcilePublicIP for service(%s): pip(%s) not found", serviceName, desiredPipName) } var deleteFuncs, updateFuncs []func() error for _, pip := range pipsToBeUpdated { pipCopy := *pip updateFuncs = append(updateFuncs, func() error { klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - updating", serviceName, *pip.Name) return az.CreateOrUpdatePIP(service, pipResourceGroup, pipCopy) }) } errs := utilerrors.AggregateGoroutines(updateFuncs...) if errs != nil { return nil, utilerrors.Flatten(errs) } for _, pip := range pipsToBeDeleted { pipCopy := *pip deleteFuncs = append(deleteFuncs, func() error { klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, *pip.Name) return az.safeDeletePublicIP(service, pipResourceGroup, &pipCopy, lb) }) } errs = utilerrors.AggregateGoroutines(deleteFuncs...) if errs != nil { return nil, utilerrors.Flatten(errs) } if !isInternal && wantLb { // Confirm desired public ip resource exists var pip *network.PublicIPAddress domainNameLabel, found := getPublicIPDomainNameLabel(service) errorIfPublicIPDoesNotExist := serviceAnnotationRequestsNamedPublicIP && discoveredDesiredPublicIP && !deletedDesiredPublicIP if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel, clusterName, errorIfPublicIPDoesNotExist, found); err != nil { return nil, err } return pip, nil } return nil, nil } // safeDeletePublicIP deletes public IP by removing its reference first. func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string, pip *network.PublicIPAddress, lb *network.LoadBalancer) error { // Remove references if pip.IPConfiguration is not nil. if pip.PublicIPAddressPropertiesFormat != nil && pip.PublicIPAddressPropertiesFormat.IPConfiguration != nil && lb != nil && lb.LoadBalancerPropertiesFormat != nil && lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations != nil { referencedLBRules := []network.SubResource{} frontendIPConfigUpdated := false loadBalancerRuleUpdated := false // Check whether there are still frontend IP configurations referring to it. ipConfigurationID := to.String(pip.PublicIPAddressPropertiesFormat.IPConfiguration.ID) if ipConfigurationID != "" { lbFrontendIPConfigs := *lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations for i := len(lbFrontendIPConfigs) - 1; i >= 0; i-- { config := lbFrontendIPConfigs[i] if strings.EqualFold(ipConfigurationID, to.String(config.ID)) { if config.FrontendIPConfigurationPropertiesFormat != nil && config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules != nil { referencedLBRules = *config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules } frontendIPConfigUpdated = true lbFrontendIPConfigs = append(lbFrontendIPConfigs[:i], lbFrontendIPConfigs[i+1:]...) break } } if frontendIPConfigUpdated { lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations = &lbFrontendIPConfigs } } // Check whether there are still load balancer rules referring to it. if len(referencedLBRules) > 0 { referencedLBRuleIDs := sets.NewString() for _, refer := range referencedLBRules { referencedLBRuleIDs.Insert(to.String(refer.ID)) } if lb.LoadBalancerPropertiesFormat.LoadBalancingRules != nil { lbRules := *lb.LoadBalancerPropertiesFormat.LoadBalancingRules for i := len(lbRules) - 1; i >= 0; i-- { ruleID := to.String(lbRules[i].ID) if ruleID != "" && referencedLBRuleIDs.Has(ruleID) { loadBalancerRuleUpdated = true lbRules = append(lbRules[:i], lbRules[i+1:]...) } } if loadBalancerRuleUpdated { lb.LoadBalancerPropertiesFormat.LoadBalancingRules = &lbRules } } } // Update load balancer when frontendIPConfigUpdated or loadBalancerRuleUpdated. if frontendIPConfigUpdated || loadBalancerRuleUpdated { err := az.CreateOrUpdateLB(service, *lb) if err != nil { klog.Errorf("safeDeletePublicIP for service(%s) failed with error: %v", getServiceName(service), err) return err } } } pipName := to.String(pip.Name) klog.V(10).Infof("DeletePublicIP(%s, %q): start", pipResourceGroup, pipName) err := az.DeletePublicIP(service, pipResourceGroup, pipName) if err != nil { return err } klog.V(10).Infof("DeletePublicIP(%s, %q): end", pipResourceGroup, pipName) return nil } func findProbe(probes []network.Probe, probe network.Probe) bool { for _, existingProbe := range probes { if strings.EqualFold(to.String(existingProbe.Name), to.String(probe.Name)) && to.Int32(existingProbe.Port) == to.Int32(probe.Port) { return true } } return false } func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule, wantLB bool) bool { for _, existingRule := range rules { if strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) && equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat, wantLB) { return true } } return false } // equalLoadBalancingRulePropertiesFormat checks whether the provided LoadBalancingRulePropertiesFormat are equal. // Note: only fields used in reconcileLoadBalancer are considered. func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRulePropertiesFormat, t *network.LoadBalancingRulePropertiesFormat, wantLB bool) bool { if s == nil || t == nil { return false } properties := reflect.DeepEqual(s.Protocol, t.Protocol) && reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) && reflect.DeepEqual(s.BackendAddressPool, t.BackendAddressPool) && reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) && reflect.DeepEqual(s.FrontendPort, t.FrontendPort) && reflect.DeepEqual(s.BackendPort, t.BackendPort) && reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP) && reflect.DeepEqual(to.Bool(s.EnableTCPReset), to.Bool(t.EnableTCPReset)) && reflect.DeepEqual(to.Bool(s.DisableOutboundSnat), to.Bool(t.DisableOutboundSnat)) if wantLB && s.IdleTimeoutInMinutes != nil && t.IdleTimeoutInMinutes != nil { return properties && reflect.DeepEqual(s.IdleTimeoutInMinutes, t.IdleTimeoutInMinutes) } return properties } // This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction. // Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined. // We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal, // despite different DestinationAddressPrefixes, in order to give it a chance to consolidate the two rules. func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) bool { for _, existingRule := range rules { if !strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) { continue } if existingRule.Protocol != rule.Protocol { continue } if !strings.EqualFold(to.String(existingRule.SourcePortRange), to.String(rule.SourcePortRange)) { continue } if !strings.EqualFold(to.String(existingRule.DestinationPortRange), to.String(rule.DestinationPortRange)) { continue } if !strings.EqualFold(to.String(existingRule.SourceAddressPrefix), to.String(rule.SourceAddressPrefix)) { continue } if !allowsConsolidation(existingRule) && !allowsConsolidation(rule) { if !strings.EqualFold(to.String(existingRule.DestinationAddressPrefix), to.String(rule.DestinationAddressPrefix)) { continue } } if existingRule.Access != rule.Access { continue } if existingRule.Direction != rule.Direction { continue } return true } return false } func (az *Cloud) getPublicIPAddressResourceGroup(service *v1.Service) string { if resourceGroup, found := service.Annotations[ServiceAnnotationLoadBalancerResourceGroup]; found { resourceGroupName := strings.TrimSpace(resourceGroup) if len(resourceGroupName) > 0 { return resourceGroupName } } return az.ResourceGroup } func (az *Cloud) isBackendPoolPreConfigured(service *v1.Service) bool { preConfigured := false isInternal := requiresInternalLoadBalancer(service) if az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesAll { preConfigured = true } if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesInternal) && isInternal { preConfigured = true } if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesExternal) && !isInternal { preConfigured = true } return preConfigured } // Check if service requires an internal load balancer. func requiresInternalLoadBalancer(service *v1.Service) bool { if l, found := service.Annotations[ServiceAnnotationLoadBalancerInternal]; found { return l == "true" } return false } func subnet(service *v1.Service) *string { if requiresInternalLoadBalancer(service) { if l, found := service.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]; found && strings.TrimSpace(l) != "" { return &l } } return nil } // getServiceLoadBalancerMode parses the mode value. // if the value is __auto__ it returns isAuto = TRUE. // if anything else it returns the unique VM set names after trimming spaces. func getServiceLoadBalancerMode(service *v1.Service) (hasMode bool, isAuto bool, vmSetNames []string) { mode, hasMode := service.Annotations[ServiceAnnotationLoadBalancerMode] mode = strings.TrimSpace(mode) isAuto = strings.EqualFold(mode, ServiceAnnotationLoadBalancerAutoModeValue) if !isAuto { // Break up list of "AS1,AS2" vmSetParsedList := strings.Split(mode, ",") // Trim the VM set names and remove duplicates // e.g. {"AS1"," AS2", "AS3", "AS3"} => {"AS1", "AS2", "AS3"} vmSetNameSet := sets.NewString() for _, v := range vmSetParsedList { vmSetNameSet.Insert(strings.TrimSpace(v)) } vmSetNames = vmSetNameSet.List() } return hasMode, isAuto, vmSetNames } func useSharedSecurityRule(service *v1.Service) bool { if l, ok := service.Annotations[ServiceAnnotationSharedSecurityRule]; ok { return l == "true" } return false } func getServiceTags(service *v1.Service) []string { if service == nil { return nil } if serviceTags, found := service.Annotations[ServiceAnnotationAllowedServiceTag]; found { result := []string{} tags := strings.Split(strings.TrimSpace(serviceTags), ",") for _, tag := range tags { serviceTag := strings.TrimSpace(tag) if serviceTag != "" { result = append(result, serviceTag) } } return result } return nil } func serviceOwnsPublicIP(pip *network.PublicIPAddress, clusterName, serviceName string) bool { if pip != nil && pip.Tags != nil { serviceTag := pip.Tags[serviceTagKey] clusterTag := pip.Tags[clusterNameKey] if serviceTag != nil && isSVCNameInPIPTag(*serviceTag, serviceName) { // Backward compatible for clusters upgraded from old releases. // In such case, only "service" tag is set. if clusterTag == nil { return true } // If cluster name tag is set, then return true if it matches. if *clusterTag == clusterName { return true } } } return false } func isSVCNameInPIPTag(tag, svcName string) bool { svcNames := parsePIPServiceTag(&tag) for _, name := range svcNames { if strings.EqualFold(name, svcName) { return true } } return false } func parsePIPServiceTag(serviceTag *string) []string { if serviceTag == nil { return []string{} } serviceNames := strings.FieldsFunc(*serviceTag, func(r rune) bool { return r == ',' }) for i, name := range serviceNames { serviceNames[i] = strings.TrimSpace(name) } return serviceNames } // bindServicesToPIP add the incoming service name to the PIP's tag // parameters: public IP address to be updated and incoming service names // return values: // 1. a bool flag to indicate if there is a new service added // 2. an error when the pip is nil // example: // "ns1/svc1" + ["ns1/svc1", "ns2/svc2"] = "ns1/svc1,ns2/svc2" func bindServicesToPIP(pip *network.PublicIPAddress, incomingServiceNames []string, replace bool) (bool, error) { if pip == nil { return false, fmt.Errorf("nil public IP") } if pip.Tags == nil { pip.Tags = map[string]*string{serviceTagKey: to.StringPtr("")} } serviceTagValue := pip.Tags[serviceTagKey] serviceTagValueSet := make(map[string]struct{}) existingServiceNames := parsePIPServiceTag(serviceTagValue) addedNew := false // replace is used when unbinding the service from PIP so addedNew remains false all the time if replace { serviceTagValue = to.StringPtr(strings.Join(incomingServiceNames, ",")) pip.Tags[serviceTagKey] = serviceTagValue return false, nil } for _, name := range existingServiceNames { if _, ok := serviceTagValueSet[name]; !ok { serviceTagValueSet[name] = struct{}{} } } for _, serviceName := range incomingServiceNames { if serviceTagValue == nil || *serviceTagValue == "" { serviceTagValue = to.StringPtr(serviceName) addedNew = true } else { // detect duplicates if _, ok := serviceTagValueSet[serviceName]; !ok { *serviceTagValue += fmt.Sprintf(",%s", serviceName) addedNew = true } else { klog.V(10).Infof("service %s has been bound to the pip already", serviceName) } } } pip.Tags[serviceTagKey] = serviceTagValue return addedNew, nil } func unbindServiceFromPIP(pip *network.PublicIPAddress, serviceName string) error { if pip == nil || pip.Tags == nil { return fmt.Errorf("nil public IP or tags") } serviceTagValue := pip.Tags[serviceTagKey] existingServiceNames := parsePIPServiceTag(serviceTagValue) var found bool for i := len(existingServiceNames) - 1; i >= 0; i-- { if strings.EqualFold(existingServiceNames[i], serviceName) { existingServiceNames = append(existingServiceNames[:i], existingServiceNames[i+1:]...) found = true } } if !found { klog.Warningf("cannot find the service %s in the corresponding PIP", serviceName) } _, err := bindServicesToPIP(pip, existingServiceNames, true) if err != nil { return err } if existingServiceName, ok := pip.Tags[serviceUsingDNSKey]; ok { if strings.EqualFold(*existingServiceName, serviceName) { pip.Tags[serviceUsingDNSKey] = to.StringPtr("") } } return nil } // ensureLoadBalancerTagged ensures every load balancer in the resource group is tagged as configured func (az *Cloud) ensureLoadBalancerTagged(lb *network.LoadBalancer) bool { changed := false if az.Tags == "" { return false } tags := parseTags(az.Tags) if lb.Tags == nil { lb.Tags = make(map[string]*string) } for k, v := range tags { if vv, ok := lb.Tags[k]; !ok || !strings.EqualFold(to.String(v), to.String(vv)) { lb.Tags[k] = v changed = true } } return changed } // ensureSecurityGroupTagged ensures the security group is tagged as configured func (az *Cloud) ensureSecurityGroupTagged(sg *network.SecurityGroup) bool { changed := false if az.Tags == "" { return false } tags := parseTags(az.Tags) if sg.Tags == nil { sg.Tags = make(map[string]*string) } for k, v := range tags { if vv, ok := sg.Tags[k]; !ok || !strings.EqualFold(to.String(v), to.String(vv)) { sg.Tags[k] = v changed = true } } return changed }
fgimenez/kubernetes
staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go
GO
apache-2.0
104,212
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.file.remote.sftp; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import org.apache.camel.BindToRegistry; import org.apache.camel.Exchange; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; import org.apache.camel.util.IOHelper; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.EnabledIf; @EnabledIf(value = "org.apache.camel.component.file.remote.services.SftpEmbeddedService#hasRequiredAlgorithms") public class SftpKeyConsumeTest extends SftpServerTestSupport { @Test public void testSftpSimpleConsume() throws Exception { String expected = "Hello World"; // create file using regular file template.sendBodyAndHeader("file://" + service.getFtpRootDir(), expected, Exchange.FILE_NAME, "hello.txt"); MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); mock.expectedHeaderReceived(Exchange.FILE_NAME, "hello.txt"); mock.expectedBodiesReceived(expected); context.getRouteController().startRoute("foo"); assertMockEndpointsSatisfied(); } private byte[] getBytesFromFile(String filename) throws IOException { InputStream input; input = new FileInputStream(new File(filename)); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOHelper.copyAndCloseInput(input, output); return output.toByteArray(); } @BindToRegistry("privateKey") public byte[] addPrivateKey() throws Exception { return getBytesFromFile("./src/test/resources/id_rsa"); } @BindToRegistry("knownHosts") public byte[] addKnownHosts() throws Exception { return getBytesFromFile("./src/test/resources/id_rsa"); } @Override protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { @Override public void configure() throws Exception { from("sftp://localhost:{{ftp.server.port}}/" + service.getFtpRootDir() + "?username=admin&knownHosts=#knownHosts&privateKey=#privateKey&privateKeyPassphrase=secret&delay=10000&strictHostKeyChecking=yes&disconnect=true") .routeId("foo").noAutoStartup().to("mock:result"); } }; } }
pmoerenhout/camel
components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/sftp/SftpKeyConsumeTest.java
Java
apache-2.0
3,273
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.tinkerpop.gremlin.console.groovy.plugin; import org.apache.tinkerpop.gremlin.TestHelper; import org.apache.tinkerpop.gremlin.driver.Result; import org.apache.tinkerpop.gremlin.server.Settings; import org.apache.tinkerpop.gremlin.util.iterator.IteratorUtils; import org.codehaus.groovy.tools.shell.Groovysh; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; /** * @author Stephen Mallette (http://stephen.genoprime.com) */ public class DriverRemoteAcceptorIntegrateTest extends AbstractGremlinServerIntegrationTest { private final Groovysh groovysh = new Groovysh(); private DriverRemoteAcceptor acceptor; @Rule public TestName name = new TestName(); /** * Configure specific Gremlin Server settings for specific tests. */ @Override public Settings overrideSettings(final Settings settings) { try { final String tinkerGraphConfig = TestHelper.generateTempFileFromResource(this.getClass(), "tinkergraph-empty.properties", ".tmp").getAbsolutePath(); settings.graphs.put("g", tinkerGraphConfig); return settings; } catch (Exception ex) { throw new RuntimeException(ex); } } @Before public void before() throws Exception { acceptor = new DriverRemoteAcceptor(groovysh); } @After public void after() { try { acceptor.close(); } catch (Exception ex) { ex.printStackTrace(); } } @Test public void shouldConnect() throws Exception { assertThat(acceptor.connect(Collections.singletonList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath())).toString(), startsWith("Connected - ")); } @Test public void shouldConnectAndSubmitSession() throws Exception { assertThat(acceptor.connect(Arrays.asList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath(), "session")).toString(), startsWith("Connected - ")); assertEquals("1", ((Iterator) acceptor.submit(Collections.singletonList("x = 1"))).next()); assertEquals("0", ((Iterator) acceptor.submit(Collections.singletonList("x - 1"))).next()); assertEquals("0", ((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).iterator().next().getString()); } @Test public void shouldConnectAndSubmitSimple() throws Exception { assertThat(acceptor.connect(Collections.singletonList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath())).toString(), startsWith("Connected - ")); assertEquals("2", ((Iterator) acceptor.submit(Collections.singletonList("1+1"))).next()); assertEquals("2", ((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).iterator().next().getString()); } @Test public void shouldConnectAndSubmitSimpleList() throws Exception { assertThat(acceptor.connect(Collections.singletonList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath())).toString(), startsWith("Connected - ")); assertThat(IteratorUtils.list(((Iterator<String>) acceptor.submit(Collections.singletonList("[1,2,3,4,5]")))), contains("1", "2", "3", "4", "5")); assertThat(((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).stream().map(Result::getString).collect(Collectors.toList()), contains("1", "2", "3", "4", "5")); } @Test public void shouldConnectAndReturnVertices() throws Exception { assertThat(acceptor.connect(Collections.singletonList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath())).toString(), startsWith("Connected - ")); assertThat(IteratorUtils.list(((Iterator<String>) acceptor.submit(Collections.singletonList("g.addVertex('name','stephen');g.addVertex('name','marko');g.traversal().V()")))), hasSize(2)); assertThat(((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).stream().map(Result::getString).collect(Collectors.toList()), hasSize(2)); } @Test public void shouldConnectAndReturnVerticesWithAnAlias() throws Exception { assertThat(acceptor.connect(Collections.singletonList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath())).toString(), startsWith("Connected - ")); acceptor.configure(Arrays.asList("alias", "x", "g")); assertThat(IteratorUtils.list(((Iterator<String>) acceptor.submit(Collections.singletonList("x.addVertex('name','stephen');x.addVertex('name','marko');x.traversal().V()")))), hasSize(2)); assertThat(((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).stream().map(Result::getString).collect(Collectors.toList()), hasSize(2)); } @Test public void shouldConnectAndSubmitForNull() throws Exception { assertThat(acceptor.connect(Collections.singletonList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath())).toString(), startsWith("Connected - ")); assertThat(IteratorUtils.list(((Iterator<String>) acceptor.submit(Collections.singletonList("g.traversal().V().drop().iterate();null")))), contains("null")); assertThat(((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).stream().map(Result::getObject).collect(Collectors.toList()), contains("null")); } @Test public void shouldConnectAndSubmitInSession() throws Exception { assertThat(acceptor.connect(Arrays.asList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath(), "session")).toString(), startsWith("Connected - ")); assertEquals("2", ((Iterator) acceptor.submit(Collections.singletonList("x=1+1"))).next()); assertEquals("2", ((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).iterator().next().getString()); assertEquals("4", ((Iterator) acceptor.submit(Collections.singletonList("x+2"))).next()); assertEquals("4", ((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).iterator().next().getString()); } @Test public void shouldConnectAndSubmitInNamedSession() throws Exception { assertThat(acceptor.connect(Arrays.asList(TestHelper.generateTempFileFromResource(this.getClass(), "remote.yaml", ".tmp").getAbsolutePath(), "session", "AAA")).toString(), startsWith("Connected - ")); assertEquals("2", ((Iterator) acceptor.submit(Collections.singletonList("x=1+1"))).next()); assertEquals("2", ((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).iterator().next().getString()); assertEquals("4", ((Iterator) acceptor.submit(Collections.singletonList("x+2"))).next()); assertEquals("4", ((List<Result>) groovysh.getInterp().getContext().getProperty(DriverRemoteAcceptor.RESULT)).iterator().next().getString()); } }
RussellSpitzer/incubator-tinkerpop
gremlin-console/src/test/java/org/apache/tinkerpop/gremlin/console/groovy/plugin/DriverRemoteAcceptorIntegrateTest.java
Java
apache-2.0
8,489
package com.cloudhopper.commons.sql.c3p0; /* * #%L * ch-commons-sql * %% * Copyright (C) 2012 - 2013 Cloudhopper by Twitter * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import com.cloudhopper.commons.sql.*; import com.cloudhopper.commons.sql.adapter.*; import com.mchange.v2.c3p0.ComboPooledDataSource; import java.beans.PropertyVetoException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Adapter for a c3p0 DataSource. * * @author joelauer */ public class C3P0DataSourceAdapter implements DataSourceAdapter { private static Logger logger = LoggerFactory.getLogger(C3P0DataSourceAdapter.class); public boolean isPooled() { return true; } public boolean isJmxCapable() { return true; } public ManagedDataSource create(DataSourceConfiguration config) throws SQLMissingDependencyException, SQLConfigurationException { // // http://www.mchange.com/projects/c3p0/index.html#configuration_properties // // // these system properties need turned off prior to creating our first // instance of the ComboPooledDataSource, otherwise, they are ignored // turn off VMID stuff (causes long ugly names for datasources) System.setProperty("com.mchange.v2.c3p0.VMID", "NONE"); // jmx is off by default if (!config.getJmx()) { // apparently, c3p0 does this with a system-wide property // com.mchange.v2.c3p0.management.ManagementCoordinator=com.mchange.v2.c3p0.management.NullManagementCoordinator System.setProperty("com.mchange.v2.c3p0.management.ManagementCoordinator", "com.mchange.v2.c3p0.management.NullManagementCoordinator"); } else { System.setProperty("com.mchange.v2.c3p0.management.ManagementCoordinator", "com.cloudhopper.commons.sql.c3p0.C3P0CustomManagementCoordinator"); } // set the JMX domain for the C3P0 C3P0CustomManagementCoordinator.setJmxDomainOnce(config.getJmxDomain()); // create a new instance of the c3p0 datasource ComboPooledDataSource cpds = new ComboPooledDataSource(true); // set properties try { // set required properties cpds.setDriverClass(config.getDriver()); cpds.setUser(config.getUsername()); cpds.setPassword(config.getPassword()); cpds.setJdbcUrl(config.getUrl()); // set optional properties cpds.setDataSourceName(config.getName()); cpds.setMinPoolSize(config.getMinPoolSize()); cpds.setMaxPoolSize(config.getMaxPoolSize()); // we'll set the initial pool size to the minimum size cpds.setInitialPoolSize(config.getMinPoolSize()); // set the validation query cpds.setPreferredTestQuery(config.getValidationQuery()); // amount of time (in ms) to wait for getConnection() to succeed cpds.setCheckoutTimeout((int)config.getCheckoutTimeout()); // checkin validation cpds.setTestConnectionOnCheckin(config.getValidateOnCheckin()); // checkout validation cpds.setTestConnectionOnCheckout(config.getValidateOnCheckout()); // amount of time to wait to validate connections // NOTE: in seconds int seconds = (int)(config.getValidateIdleConnectionTimeout()/1000); cpds.setIdleConnectionTestPeriod(seconds); // set idleConnectionTimeout // NOTE: in seconds seconds = (int)(config.getIdleConnectionTimeout()/1000); cpds.setMaxIdleTimeExcessConnections(seconds); // set activeConnectionTimeout seconds = (int)(config.getActiveConnectionTimeout()/1000); cpds.setUnreturnedConnectionTimeout(seconds); if (config.getDebug()) { cpds.setDebugUnreturnedConnectionStackTraces(true); } else { cpds.setDebugUnreturnedConnectionStackTraces(false); } // properties I think aren't valid for c3p0 // defines how many times c3p0 will try to acquire a new Connection from the database before giving up. cpds.setAcquireRetryAttempts(10); } catch (PropertyVetoException e) { throw new SQLConfigurationException("Property was vetoed during configuration", e); } /** // configure c3p0 defaults that seem to make more sense /** * c3p0.acquireIncrement hibernate.c3p0.acquire_increment c3p0.idleConnectionTestPeriod hibernate.c3p0.idle_test_period c3p0.initialPoolSize not available -- uses minimum size c3p0.maxIdleTime hibernate.c3p0.timeout c3p0.maxPoolSize hibernate.c3p0.max_size c3p0.maxStatements hibernate.c3p0.max_statements c3p0.minPoolSize hibernate.c3p0.min_size c3p0.testConnectionsOnCheckout hibernate.c3p0.validate hibernate 2.x only! */ return new C3P0ManagedDataSource(this, config, cpds); } }
twitter/cloudhopper-commons
ch-commons-sql/src/main/java/com/cloudhopper/commons/sql/c3p0/C3P0DataSourceAdapter.java
Java
apache-2.0
5,661
// Copyright 2011-2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.security.zynamics.reil.algorithms.mono2.common.interfaces; public interface ILatticeElement<LatticeElementType> { /** * Function to allow the lattice element as soon as the solver discovers a native instruction exit * edge. */ public void onInstructionExit(); // In the future we might add: onBasicBlockExit() and onFunctionExit() public LatticeElementType copy(); }
google/binnavi
src/main/java/com/google/security/zynamics/reil/algorithms/mono2/common/interfaces/ILatticeElement.java
Java
apache-2.0
995
'use strict' const mongoose = require('mongoose') require('dotenv').config() let mongoDB = `mongodb://${process.env.HOST_NAME}/${process.env.DATABASE_NAME}` mongoose.Promise = global.Promise mongoose.connect(mongoDB) let db = mongoose.connection db.on('error', console.error.bind(console, 'MongoDB Connection error'))
adamsaparudin/spaced-repetition
server/db.js
JavaScript
apache-2.0
322
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.oodt.pcs.health; /** * * Met keys for the {@link WorkflowStatesFile} * * @author mattmann * @version $Revision$ */ public interface WorkflowStatesMetKeys { String WORKFLOW_STATES_GROUP = "WorkflowStatesGroup"; String WORKFLOW_STATES_VECTOR = "States"; }
IMS94/oodt
pcs/core/src/main/java/org/apache/oodt/pcs/health/WorkflowStatesMetKeys.java
Java
apache-2.0
1,093
/* * Copyright (C) 2015 AppTik Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.apptik.widget.captcha.builders; import io.apptik.widget.captcha.fragments.PointItCaptcha; public class PointItBuilder extends CaptchaFragmentBuilder { public PointItBuilder() { fragment = new PointItCaptcha(); } }
djodjoni/aCaptcha
lib/src/main/java/io/apptik/widget/captcha/builders/PointItBuilder.java
Java
apache-2.0
851
/* * Copyright 2014 - 2015 Real Logic Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.co.real_logic.aeron.driver; import uk.co.real_logic.aeron.protocol.DataHeaderFlyweight; import uk.co.real_logic.agrona.concurrent.UnsafeBuffer; import java.net.InetSocketAddress; @FunctionalInterface public interface DataPacketHandler { /** * Handle a Data Frame from the network. * * @param header of the first Data Frame in the packet (may be re-wrapped if needed) * @param buffer holding the data (always starts at 0 offset) * @param length of the packet (may be longer than the header frame length) * @param srcAddress of the packet * @return the number of bytes received. */ int onDataPacket(DataHeaderFlyweight header, UnsafeBuffer buffer, int length, InetSocketAddress srcAddress); }
rlankenau/Aeron
aeron-driver/src/main/java/uk/co/real_logic/aeron/driver/DataPacketHandler.java
Java
apache-2.0
1,355
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { Button, Intent } from '@blueprintjs/core'; import { IconNames } from '@blueprintjs/icons'; import axios from 'axios'; import React from 'react'; import ReactTable from 'react-table'; import { ActionCell, RefreshButton, TableColumnSelector, ViewControlBar } from '../../components'; import { AsyncActionDialog, LookupEditDialog } from '../../dialogs/'; import { AppToaster } from '../../singletons/toaster'; import { getDruidErrorMessage, LocalStorageKeys, QueryManager } from '../../utils'; import { BasicAction } from '../../utils/basic-action'; import { LocalStorageBackedArray } from '../../utils/local-storage-backed-array'; import './lookups-view.scss'; const tableColumns: string[] = ['Lookup name', 'Tier', 'Type', 'Version', ActionCell.COLUMN_LABEL]; const DEFAULT_LOOKUP_TIER: string = '__default'; export interface LookupsViewProps {} export interface LookupsViewState { lookups?: any[]; loadingLookups: boolean; lookupsError?: string; lookupsUninitialized: boolean; lookupEditDialogOpen: boolean; lookupEditName: string; lookupEditTier: string; lookupEditVersion: string; lookupEditSpec: string; isEdit: boolean; allLookupTiers: string[]; deleteLookupName?: string; deleteLookupTier?: string; hiddenColumns: LocalStorageBackedArray<string>; } export class LookupsView extends React.PureComponent<LookupsViewProps, LookupsViewState> { private lookupsQueryManager: QueryManager<null, { lookupEntries: any[]; tiers: string[] }>; constructor(props: LookupsViewProps, context: any) { super(props, context); this.state = { lookups: [], loadingLookups: true, lookupsUninitialized: false, lookupEditDialogOpen: false, lookupEditTier: '', lookupEditName: '', lookupEditVersion: '', lookupEditSpec: '', isEdit: false, allLookupTiers: [], hiddenColumns: new LocalStorageBackedArray<string>( LocalStorageKeys.LOOKUP_TABLE_COLUMN_SELECTION, ), }; this.lookupsQueryManager = new QueryManager({ processQuery: async () => { const tiersResp = await axios.get('/druid/coordinator/v1/lookups/config?discover=true'); const tiers = tiersResp.data && tiersResp.data.length > 0 ? tiersResp.data : [DEFAULT_LOOKUP_TIER]; const lookupEntries: {}[] = []; const lookupResp = await axios.get('/druid/coordinator/v1/lookups/config/all'); const lookupData = lookupResp.data; Object.keys(lookupData).map((tier: string) => { const lookupIds = lookupData[tier]; Object.keys(lookupIds).map((id: string) => { lookupEntries.push({ tier, id, version: lookupIds[id].version, spec: lookupIds[id].lookupExtractorFactory, }); }); }); return { lookupEntries, tiers, }; }, onStateChange: ({ result, loading, error }) => { this.setState({ lookups: result ? result.lookupEntries : undefined, loadingLookups: loading, lookupsError: error, lookupsUninitialized: error === 'Request failed with status code 404', allLookupTiers: result ? result.tiers : [], }); }, }); } componentDidMount(): void { this.lookupsQueryManager.runQuery(null); } componentWillUnmount(): void { this.lookupsQueryManager.terminate(); } private async initializeLookup() { try { await axios.post(`/druid/coordinator/v1/lookups/config`, {}); this.lookupsQueryManager.rerunLastQuery(); } catch (e) { AppToaster.show({ icon: IconNames.ERROR, intent: Intent.DANGER, message: getDruidErrorMessage(e), }); } } private async openLookupEditDialog(tier: string, id: string) { const { lookups } = this.state; if (!lookups) return; const target: any = lookups.find((lookupEntry: any) => { return lookupEntry.tier === tier && lookupEntry.id === id; }); if (id === '') { this.setState(prevState => ({ lookupEditName: '', lookupEditTier: prevState.allLookupTiers[0], lookupEditDialogOpen: true, lookupEditSpec: '', lookupEditVersion: new Date().toISOString(), isEdit: false, })); } else { this.setState({ lookupEditName: id, lookupEditTier: tier, lookupEditDialogOpen: true, lookupEditSpec: JSON.stringify(target.spec, null, 2), lookupEditVersion: target.version, isEdit: true, }); } } private handleChangeLookup = (field: string, value: string) => { this.setState({ [field]: value, } as any); }; private async submitLookupEdit() { const { lookupEditTier, lookupEditName, lookupEditSpec, lookupEditVersion, isEdit, } = this.state; let endpoint = '/druid/coordinator/v1/lookups/config'; const specJson: any = JSON.parse(lookupEditSpec); let dataJson: any; if (isEdit) { endpoint = `${endpoint}/${lookupEditTier}/${lookupEditName}`; dataJson = { version: lookupEditVersion, lookupExtractorFactory: specJson, }; } else { dataJson = { [lookupEditTier]: { [lookupEditName]: { version: lookupEditVersion, lookupExtractorFactory: specJson, }, }, }; } try { await axios.post(endpoint, dataJson); this.setState({ lookupEditDialogOpen: false, }); this.lookupsQueryManager.rerunLastQuery(); } catch (e) { AppToaster.show({ icon: IconNames.ERROR, intent: Intent.DANGER, message: getDruidErrorMessage(e), }); } } private getLookupActions(lookupTier: string, lookupId: string): BasicAction[] { return [ { icon: IconNames.EDIT, title: 'Edit', onAction: () => this.openLookupEditDialog(lookupTier, lookupId), }, { icon: IconNames.CROSS, title: 'Delete', intent: Intent.DANGER, onAction: () => this.setState({ deleteLookupTier: lookupTier, deleteLookupName: lookupId }), }, ]; } renderDeleteLookupAction() { const { deleteLookupTier, deleteLookupName } = this.state; if (!deleteLookupTier) return; return ( <AsyncActionDialog action={async () => { await axios.delete( `/druid/coordinator/v1/lookups/config/${deleteLookupTier}/${deleteLookupName}`, ); }} confirmButtonText="Delete lookup" successText="Lookup was deleted" failText="Could not delete lookup" intent={Intent.DANGER} onClose={() => { this.setState({ deleteLookupTier: undefined, deleteLookupName: undefined }); }} onSuccess={() => { this.lookupsQueryManager.rerunLastQuery(); }} > <p>{`Are you sure you want to delete the lookup '${deleteLookupName}'?`}</p> </AsyncActionDialog> ); } renderLookupsTable() { const { lookups, loadingLookups, lookupsError, lookupsUninitialized, hiddenColumns, } = this.state; if (lookupsUninitialized) { return ( <div className="init-div"> <Button icon={IconNames.BUILD} text="Initialize lookups" onClick={() => this.initializeLookup()} /> </div> ); } return ( <> <ReactTable data={lookups || []} loading={loadingLookups} noDataText={ !loadingLookups && lookups && !lookups.length ? 'No lookups' : lookupsError || '' } filterable columns={[ { Header: 'Lookup name', id: 'lookup_name', accessor: 'id', filterable: true, show: hiddenColumns.exists('Lookup name'), }, { Header: 'Tier', id: 'tier', accessor: 'tier', filterable: true, show: hiddenColumns.exists('Tier'), }, { Header: 'Type', id: 'type', accessor: 'spec.type', filterable: true, show: hiddenColumns.exists('Type'), }, { Header: 'Version', id: 'version', accessor: 'version', filterable: true, show: hiddenColumns.exists('Version'), }, { Header: ActionCell.COLUMN_LABEL, id: ActionCell.COLUMN_ID, width: ActionCell.COLUMN_WIDTH, accessor: (row: any) => ({ id: row.id, tier: row.tier }), filterable: false, Cell: (row: any) => { const lookupId = row.value.id; const lookupTier = row.value.tier; const lookupActions = this.getLookupActions(lookupTier, lookupId); return <ActionCell actions={lookupActions} />; }, show: hiddenColumns.exists(ActionCell.COLUMN_LABEL), }, ]} defaultPageSize={50} /> </> ); } renderLookupEditDialog() { const { lookupEditDialogOpen, allLookupTiers, lookupEditSpec, lookupEditTier, lookupEditName, lookupEditVersion, isEdit, } = this.state; if (!lookupEditDialogOpen) return; return ( <LookupEditDialog onClose={() => this.setState({ lookupEditDialogOpen: false })} onSubmit={() => this.submitLookupEdit()} onChange={this.handleChangeLookup} lookupSpec={lookupEditSpec} lookupName={lookupEditName} lookupTier={lookupEditTier} lookupVersion={lookupEditVersion} isEdit={isEdit} allLookupTiers={allLookupTiers} /> ); } render(): JSX.Element { const { lookupsError, hiddenColumns } = this.state; return ( <div className="lookups-view app-view"> <ViewControlBar label="Lookups"> <RefreshButton onRefresh={auto => this.lookupsQueryManager.rerunLastQuery(auto)} localStorageKey={LocalStorageKeys.LOOKUPS_REFRESH_RATE} /> {!lookupsError && ( <Button icon={IconNames.PLUS} text="Add lookup" onClick={() => this.openLookupEditDialog('', '')} /> )} <TableColumnSelector columns={tableColumns} onChange={column => this.setState(prevState => ({ hiddenColumns: prevState.hiddenColumns.toggle(column), })) } tableColumnsHidden={hiddenColumns.storedArray} /> </ViewControlBar> {this.renderLookupsTable()} {this.renderLookupEditDialog()} {this.renderDeleteLookupAction()} </div> ); } }
michaelschiff/druid
web-console/src/views/lookups-view/lookups-view.tsx
TypeScript
apache-2.0
11,913
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using Entities; namespace DAL { public class CreatorDatosClinicosDAO { public DatosClinicosDAO creator(DatosClinicos dc) { string tipo = dc.tipo; if (tipo.Equals("Alergias")) { CreatorAlergiasDAO alDAO = new CreatorAlergiasDAO(); return alDAO.creator(); } else if (tipo.Equals("Antecedentes")) { CreatorAntecedentesDAO alDAO = new CreatorAntecedentesDAO(); return alDAO.creator(); } else if (tipo.Equals("Cirugias")) { CreatorCirugiasDAO alDAO = new CreatorCirugiasDAO(); return alDAO.creator(); } else if (tipo.Equals("Enfermedades")) { CreatorEnfermedadesDAO alDAO = new CreatorEnfermedadesDAO(); return alDAO.creator(); } else if (tipo.Equals("Medicamentos")) { CreatorMedicamentosDAO alDAO = new CreatorMedicamentosDAO(); return alDAO.creator(); } else { return null; } } } }
fabver/EmergenSystServer
EmergenSyst/EmergenSyst/DAL/SOA/Creators/CreatorDatosClinicosDAO.cs
C#
apache-2.0
1,344
package com.app.teacup; import android.content.Intent; import android.os.Bundle; import android.support.design.widget.CollapsingToolbarLayout; import android.support.v7.widget.Toolbar; import android.view.View; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.TextView; import android.widget.Toast; import com.app.teacup.bean.Music.MusicDetail; import com.app.teacup.bean.Music.MusicDetailInfo; import com.app.teacup.bean.Music.MusicInfo; import com.app.teacup.ui.MoreTextView; import com.app.teacup.util.OkHttpUtils; import com.app.teacup.util.urlUtils; import com.bumptech.glide.Glide; import com.bumptech.glide.load.engine.DiskCacheStrategy; import com.squareup.okhttp.Request; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.Locale; public class MusicDetailActivity extends BaseActivity { private MusicInfo mMusicInfo; private MusicDetailInfo mDetailInfo; private TextView mMusicTitle; private TextView mMusicType; private MoreTextView mMusicContent; private LinearLayout mPlayList; private TextView mMusicTotal; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.layout_music_detail); initToolBar(); initView(); startLoadData(); } @Override protected void onLoadDataError() { Toast.makeText(MusicDetailActivity.this, getString(R.string.refresh_net_error), Toast.LENGTH_SHORT).show(); } @Override protected void onLoadDataFinish() { initData(); } @Override protected void onRefreshError() { } @Override protected void onRefreshFinish() { } @Override protected void onRefreshStart() { } private void initToolBar() { Toolbar toolbar = (Toolbar) findViewById(R.id.music_toolbar); setSupportActionBar(toolbar); if (getSupportActionBar() != null) { getSupportActionBar().setDisplayHomeAsUpEnabled(true); } if (toolbar != null) { toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { onBackPressed(); } }); } } private void initView() { mDetailInfo = new MusicDetailInfo(); mMusicInfo = (MusicInfo) getIntent().getSerializableExtra("music"); CollapsingToolbarLayout mCollapsingToolbar = (CollapsingToolbarLayout) findViewById(R.id.collapsing_toolbar); if (mCollapsingToolbar != null) { String[] split = mMusicInfo.getTitle().split(" "); mCollapsingToolbar.setTitle(split[1]); } ImageView ivImage = (ImageView) findViewById(R.id.iv_music_image); if (ivImage != null) { if (!MainActivity.mIsLoadPhoto) { Glide.with(this).load(mMusicInfo.getImgUrl()) .error(R.drawable.photo_loaderror) .dontAnimate() .diskCacheStrategy(DiskCacheStrategy.ALL) .crossFade() .into(ivImage); } else { if (MainActivity.mIsWIFIState) { Glide.with(this).load(mMusicInfo.getImgUrl()) .error(R.drawable.photo_loaderror) .dontAnimate() .diskCacheStrategy(DiskCacheStrategy.ALL) .crossFade() .into(ivImage); } else { ivImage.setImageResource(R.drawable.main_load_bg); } } } mMusicTitle = (TextView) findViewById(R.id.tv_music_title); mMusicType = (TextView) findViewById(R.id.tv_music_type); mMusicContent = (MoreTextView) findViewById(R.id.tv_music_content); mMusicTotal = (TextView) findViewById(R.id.tv_music_total); mPlayList = (LinearLayout) findViewById(R.id.ll_play_list); } private void initData() { mMusicTitle.setText(mMusicInfo.getTitle()); mMusicType.setText(mDetailInfo.getType()); mMusicContent.setContent(mDetailInfo.getContent()); int total = mDetailInfo.getMusicList().size(); String musicTotal = String.format(Locale.getDefault(), " %d%s", total, getString(R.string.music_total_end)); mMusicTotal.setText(musicTotal); initLayout(); } private void initLayout() { List<MusicDetail> list = mDetailInfo.getMusicList(); for (int i = 0; i < list.size(); i++) { View view = View.inflate(MusicDetailActivity.this, R.layout.item_music_detail, null); TextView index = (TextView) view.findViewById(R.id.tv_play_index); ImageView img = (ImageView) view.findViewById(R.id.iv_play_img); TextView name = (TextView) view.findViewById(R.id.tv_play_name); TextView user = (TextView) view.findViewById(R.id.tv_play_user); index.setText(String.format(Locale.getDefault(), "%02d", i + 1)); if (!MainActivity.mIsLoadPhoto) { Glide.with(this).load(list.get(i).getImgUrl()) .error(R.drawable.photo_loaderror) .dontAnimate() .diskCacheStrategy(DiskCacheStrategy.ALL) .crossFade() .into(img); } else { if (MainActivity.mIsWIFIState) { Glide.with(this).load(list.get(i).getImgUrl()) .error(R.drawable.photo_loaderror) .dontAnimate() .diskCacheStrategy(DiskCacheStrategy.ALL) .crossFade() .into(img); } else { img.setImageResource(R.drawable.main_load_bg); } } name.setText(list.get(i).getMusicName()); user.setText(list.get(i).getMusicPlayer()); final int finalI = i; view.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Intent intent = new Intent(MusicDetailActivity.this, MusicPlayActivity.class); intent.putExtra("position", finalI); intent.putExtra("musicList", (Serializable) mDetailInfo.getMusicList()); startActivity(intent); } }); mPlayList.addView(view); } } private void startLoadData() { OkHttpUtils.getAsyn(mMusicInfo.getNextUrl(), new OkHttpUtils.ResultCallback<String>() { @Override public void onError(Request request, Exception e) { sendParseDataMessage(LOAD_DATA_ERROR); } @Override public void onResponse(String response) { parseMusicData(response); sendParseDataMessage(LOAD_DATA_FINISH); } }); } private void parseMusicData(String response) { Document document = Jsoup.parse(response); Element name = document.getElementsByClass("vol-name").get(0); Element title = name.getElementsByClass("vol-title").get(0); Element dec = document.getElementsByClass("vol-desc").get(0); mDetailInfo.setType(title.text()); mDetailInfo.setContent(dec.text()); Element playlist = document.getElementById("luooPlayerPlaylist"); List<MusicDetail> musicList = new ArrayList<>(); Elements lis = playlist.getElementsByTag("li"); try { for (Element wrapper : lis) { Element li = wrapper.getElementsByClass("track-wrapper").get(0); MusicDetail music = new MusicDetail(); Element trackCover = li.getElementsByClass("btn-action-share").get(0); music.setImgUrl(trackCover.attr("data-img")); Element trackName = li.getElementsByClass("trackname").get(0); music.setMusicName(trackName.text()); Element trackMeta = li.getElementsByClass("artist").get(0); music.setMusicPlayer(trackMeta.text()); musicList.add(music); } } catch (Exception e) { // do not process } String id = mMusicInfo.getTitle().substring(4, 7); for (int i = 1; i <= musicList.size(); i++) { String num = String.format(Locale.getDefault(), "%02d", i); String musicUrl = urlUtils.MUSIC_PLAYER_URL + id + "/" + num + ".mp3"; musicList.get(i - 1).setMusicUrl(musicUrl); } mDetailInfo.setMusicList(musicList); } }
henryblue/TeaCup
app/src/main/java/com/app/teacup/MusicDetailActivity.java
Java
apache-2.0
9,091
package swift.crdt; import swift.crdt.core.CRDTUpdate; public class MaxUpdate<V extends Comparable<V>> implements CRDTUpdate<MaxCRDT<V>> { V value; public MaxUpdate(V value) { this.value = value; } @Override public void applyTo(MaxCRDT<V> crdt) { crdt.applySet(value); } @Override public Object getValueWithoutMetadata() { return value; } }
SyncFree/SwiftCloud
src-core/swift/crdt/MaxUpdate.java
Java
apache-2.0
405
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include "test/pagespeed/kernel/base/mock_message_handler.h" #include <map> #include <utility> #include "pagespeed/kernel/base/abstract_mutex.h" #include "pagespeed/kernel/base/google_message_handler.h" #include "pagespeed/kernel/base/message_handler.h" #include "pagespeed/kernel/base/string.h" #include "pagespeed/kernel/base/string_util.h" #include "pagespeed/kernel/base/writer.h" namespace net_instaweb { MockMessageHandler::MockMessageHandler(AbstractMutex* mutex) : mutex_(mutex) {} MockMessageHandler::~MockMessageHandler() {} void MockMessageHandler::MessageSImpl(MessageType type, const GoogleString& message) { ScopedMutex hold_mutex(mutex_.get()); if (ShouldPrintMessage(message)) { internal_handler_.MessageSImpl(type, message); GoogleString type_str = MessageTypeToString(type); StrAppend(&buffer_, type_str.substr(0, 1), "[Wed Jan 01 00:00:00 2014] "); StrAppend(&buffer_, "[", type_str, "] [00000] "); StrAppend(&buffer_, message, "\n"); } else { ++skipped_message_counts_[type]; } ++message_counts_[type]; } void MockMessageHandler::FileMessageSImpl(MessageType type, const char* filename, int line, const GoogleString& message) { ScopedMutex hold_mutex(mutex_.get()); if (ShouldPrintMessage(message)) { internal_handler_.FileMessageSImpl(type, filename, line, message); GoogleString type_str = MessageTypeToString(type); StrAppend(&buffer_, type_str.substr(0, 1), "[Wed Jan 01 00:00:00 2014] "); StrAppend(&buffer_, "[", type_str, "] [00000] "); StrAppend(&buffer_, "[", filename, ":", IntegerToString(line), "] "); StrAppend(&buffer_, message, "\n"); } else { ++skipped_message_counts_[type]; } ++message_counts_[type]; } int MockMessageHandler::MessagesOfType(MessageType type) const { ScopedMutex hold_mutex(mutex_.get()); return MessagesOfTypeImpl(message_counts_, type); } int MockMessageHandler::SkippedMessagesOfType(MessageType type) const { ScopedMutex hold_mutex(mutex_.get()); return MessagesOfTypeImpl(skipped_message_counts_, type); } int MockMessageHandler::MessagesOfTypeImpl(const MessageCountMap& counts, MessageType type) const { MessageCountMap::const_iterator i = counts.find(type); if (i != counts.end()) { return i->second; } else { return 0; } } int MockMessageHandler::TotalMessages() const { ScopedMutex hold_mutex(mutex_.get()); return TotalMessagesImpl(message_counts_); } int MockMessageHandler::TotalSkippedMessages() const { ScopedMutex hold_mutex(mutex_.get()); return TotalMessagesImpl(skipped_message_counts_); } int MockMessageHandler::TotalMessagesImpl(const MessageCountMap& counts) const { int total = 0; for (MessageCountMap::const_iterator i = counts.begin(); i != counts.end(); ++i) { total += i->second; } return total; } int MockMessageHandler::SeriousMessages() const { ScopedMutex hold_mutex(mutex_.get()); int num = TotalMessagesImpl(message_counts_) - MessagesOfTypeImpl(message_counts_, kInfo); return num; } void MockMessageHandler::set_mutex(AbstractMutex* mutex) { mutex_->DCheckUnlocked(); mutex_.reset(mutex); } void MockMessageHandler::AddPatternToSkipPrinting(const char* pattern) { patterns_to_skip_.Allow(GoogleString(pattern)); } bool MockMessageHandler::ShouldPrintMessage(const StringPiece& msg) { return !patterns_to_skip_.Match(msg, false); } bool MockMessageHandler::Dump(Writer* writer) { if (buffer_ == "") { return false; } return (writer->Write(buffer_, &internal_handler_)); } } // namespace net_instaweb
pagespeed/mod_pagespeed
test/pagespeed/kernel/base/mock_message_handler.cc
C++
apache-2.0
4,545
package com.cloudbees.genapp.metadata.resource; /* * Copyright 2010-2013, CloudBees Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.*; /** * This class stores properties for a given section (i.e. java, tomcat7, glassfish3, etc...) */ public class RuntimeProperty { private Map<String, String> parameters; private String section; /** * Create a new RuntimeProperty from the section name and a map of key-value pairs. * @param section The parent section of the parameters. * @param parameters A key-value map of the parameters. */ public RuntimeProperty (String section, Map<String, String> parameters) { this.parameters = parameters; this.section = section; } public String getSectionName() { return section; } public String getParameter(String parameterName) { return parameters.get(parameterName); } public Map<String, String> getParameters() { return parameters; } }
CloudBees-community/genapp-setup
src/main/java/com/cloudbees/genapp/metadata/resource/RuntimeProperty.java
Java
apache-2.0
1,524
/* * Copyright (C) 2009 Trustees of the University of Pennsylvania * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS of ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.upenn.cis.ppod.model.provenance; import static com.google.common.collect.Lists.newArrayList; import java.util.List; import edu.upenn.cis.ppod.model.Otu; /** * @author Sam Donnelly * */ public class Provenance { private String voucherCode = ""; private List<Otu> otus = newArrayList(); private List<Chromatogram> chromotgrams = newArrayList(); private List<Image> images = newArrayList(); private List<Provenance> provenances; }
snd297/penn-ppod
cdm/src/main/java/edu/upenn/cis/ppod/model/provenance/Provenance.java
Java
apache-2.0
1,115
/* This Java source file was generated by test-to-java.xsl and is a derived work from the source document. The source document contained the following notice: Copyright (c) 2001 World Wide Web Consortium, (Massachusetts Institute of Technology, Institut National de Recherche en Informatique et en Automatique, Keio University). All Rights Reserved. This program is distributed under the W3C's Software Intellectual Property License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See W3C License http://www.w3.org/Consortium/Legal/ for more details. */ package org.w3c.domts.level2.core; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.w3c.dom.Document; import org.w3c.dom.EntityReference; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.w3c.domts.DOMTest.load; /** * The method "isSupported(feature,version)" Tests whether the DOM implementation * implements a specific feature and that feature is supported by this node. * <p> * Call the isSupported method specifying empty strings for feature and version on a * new EntityReference node. Check if the value returned value was false. * * @see <a href="http://www.w3.org/TR/DOM-Level-2-Core/core#Level-2-Core-Node-supports">http://www.w3.org/TR/DOM-Level-2-Core/core#Level-2-Core-Node-supports</a> */ public class nodeissupported04 { @Test @Disabled public void testRun() throws Throwable { Document doc = load("staffNS", false); EntityReference entRef = doc.createEntityReference("ent1"); boolean success = entRef.isSupported("XML CORE", ""); assertFalse(success, "nodeissupported04"); } /** * Gets URI that identifies the test * * @return uri identifier of test */ public String getTargetURI() { return "http://www.w3.org/2001/DOM-Test-Suite/level2/core/nodeissupported04"; } }
apache/xmlbeans
src/test/java/org/w3c/domts/level2/core/nodeissupported04.java
Java
apache-2.0
2,108
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using Microsoft.VisualStudio.IntegrationTest.Utilities.InProcess; using Xunit; namespace Microsoft.VisualStudio.IntegrationTest.Utilities.OutOfProcess { /// <summary> /// Provides a means of interacting with the Visual Studio debugger by remoting calls into Visual Studio. /// </summary> public partial class Debugger_OutOfProc : OutOfProcComponent { private readonly Debugger_InProc _debuggerInProc; private readonly VisualStudioInstance _instance; public Debugger_OutOfProc(VisualStudioInstance visualStudioInstance) : base(visualStudioInstance) { _instance = visualStudioInstance; _debuggerInProc = CreateInProcComponent<Debugger_InProc>(visualStudioInstance); } public void SetBreakPoint(string fileName, int lineNumber, int columnIndex) => _debuggerInProc.SetBreakPoint(fileName, lineNumber, columnIndex); public void SetBreakPoint(string fileName, string text, int charsOffset = 0) { _instance.Editor.Activate(); _instance.Editor.SelectTextInCurrentDocument(text); int lineNumber = _instance.Editor.GetLine(); int columnIndex = _instance.Editor.GetColumn(); SetBreakPoint(fileName, lineNumber, columnIndex + charsOffset); } public void Go(bool waitForBreakMode) => _debuggerInProc.Go(waitForBreakMode); public void StepOver(bool waitForBreakOrEnd) => _debuggerInProc.StepOver(waitForBreakOrEnd); public void Stop(bool waitForDesignMode) => _debuggerInProc.Stop(waitForDesignMode); public void SetNextStatement() => _debuggerInProc.SetNextStatement(); public void ExecuteStatement(string statement) => _debuggerInProc.ExecuteStatement(statement); public void CheckExpression(string expressionText, string expectedType, string expectedValue) { var entry = _debuggerInProc.GetExpression(expressionText); Assert.Equal(expectedType, entry.Type); Assert.Equal(expectedValue, entry.Value); } } }
nguerrera/roslyn
src/VisualStudio/IntegrationTest/TestUtilities/OutOfProcess/Debugger_OutOfProc.cs
C#
apache-2.0
2,263
import { ScenarioResult } from './../shared/services/scenario/scenario.models'; import { BroadcastEvent } from 'app/shared/models/broadcast-event'; import { BroadcastService } from './../shared/services/broadcast.service'; import { Subject } from 'rxjs/Subject'; import { Subscription as RxSubscription } from 'rxjs/Subscription'; import { DisableInfo } from './feature-item'; import { PortalService } from '../shared/services/portal.service'; import { OpenBladeInfo, FrameBladeParams } from '../shared/models/portal'; import { SiteTabIds } from 'app/shared/models/constants'; import { PortalResources } from 'app/shared/models/portal-resources'; export interface DisableInfo { enabled: boolean; disableMessage: string; } export class FeatureItem { public title: string | null; public keywords: string | null; // Space delimited public enabled = true; public info: string | null; public warning: string | null; public isHighlighted: boolean | null; public isEmpty: boolean | null; // Used to reserve blank space when filtering results public highlight: boolean | null; public iconUrl = 'image/activity-log.svg'; public superScriptIconUrl: string | null = null; public nameFocusable: boolean; public imageFocusable: boolean; public onName = false; public onImage = false; constructor(title: string, keywords: string, info: string, iconUrl?: string, superScriptIconUrl?: string) { this.title = title; this.keywords = keywords; this.info = info; this.iconUrl = iconUrl ? iconUrl : this.iconUrl; this.superScriptIconUrl = superScriptIconUrl; } click() {} dispose() {} } export class DisableableFeature extends FeatureItem { private _enabledRxSub: RxSubscription; private _overrideSub: RxSubscription; public enabled = false; constructor( title: string, keywords: string, info: string, imageUrl: string, superScriptIconUrl?: string, _disableInfoStream?: Subject<DisableInfo>, overrideDisableInfo?: ScenarioResult, // If the feature is known to be disabled before any async logic, then use this disable immediately overrideDisableStream?: Subject<ScenarioResult> ) { super(title, keywords, info, imageUrl, superScriptIconUrl); if (overrideDisableInfo) { this.setWarningandEnabled(overrideDisableInfo); } else if (overrideDisableStream) { this._overrideSub = overrideDisableStream.subscribe(streamInfo => { this.setWarningandEnabled(streamInfo); }); } else if (_disableInfoStream) { this._enabledRxSub = _disableInfoStream.subscribe(disableInfo => { this.enabled = disableInfo.enabled; if (!this.enabled) { this.warning = disableInfo.disableMessage; } }); } } dispose() { if (this._enabledRxSub) { this._enabledRxSub.unsubscribe(); this._enabledRxSub = null; } if (this._overrideSub) { this._overrideSub.unsubscribe(); this._overrideSub = null; } } setWarningandEnabled(scenarioResult: ScenarioResult) { if (scenarioResult.status === 'disabled') { this.warning = scenarioResult.data; } this.enabled = scenarioResult.status !== 'disabled'; } } abstract class BaseDisableableBladeFeature<T = any> extends DisableableFeature { constructor( title: string, keywords: string, info: string, imageUrl: string, protected _bladeInfo: OpenBladeInfo<T>, protected _portalService: PortalService, disableInfoStream?: Subject<DisableInfo>, overrideDisableInfo?: ScenarioResult ) { super(title, keywords, info, imageUrl, null, disableInfoStream, overrideDisableInfo); } } export class DisableableBladeFeature extends BaseDisableableBladeFeature<any> { click() { this._portalService.openBlade(this._bladeInfo, 'site-manage'); } } export class DisableableFrameBladeFeature<T = any> extends BaseDisableableBladeFeature<FrameBladeParams<T>> { click() { this._portalService.openFrameBlade(this._bladeInfo, 'site-manage'); } } abstract class BaseBladeFeature<T = any> extends FeatureItem { constructor( title: string, keywords: string, info: string, imageUrl: string, public bladeInfo: OpenBladeInfo<T>, protected _portalService: PortalService ) { super(title, keywords, info, imageUrl); } } export class BladeFeature extends BaseBladeFeature<any> { click() { this._portalService.openBlade(this.bladeInfo, 'site-manage'); } } export class FrameBladeFeature<T = any> extends BaseBladeFeature<FrameBladeParams<T>> { click() { this._portalService.openFrameBlade(this.bladeInfo, 'site-manage'); } } export class OpenBrowserWindowFeature extends FeatureItem { constructor(title: string, keywords: string, info: string, private _url: string) { super(title, keywords, info); } click() { window.open(this._url); } } export class TabFeature extends FeatureItem { constructor( title: string, keywords: string, info: string, imageUrl: string, public featureId: string, private _broadcastService: BroadcastService ) { super(title, keywords, info, imageUrl, 'image/new-tab.svg'); if (featureId === SiteTabIds.logicApps) { this.warning = PortalResources.tab_logicAppsDeprecation; } } click() { this._broadcastService.broadcastEvent(BroadcastEvent.OpenTab, this.featureId); } } export class DisableableTabFeature extends DisableableFeature { constructor( title: string, keywords: string, info: string, imageUrl: string, public featureId: string, private _broadcastService: BroadcastService, disableInfoStream?: Subject<DisableInfo>, overrideDisableInfo?: ScenarioResult ) { super(title, keywords, info, imageUrl, 'image/new-tab.svg', disableInfoStream, overrideDisableInfo); } click() { this._broadcastService.broadcastEvent(BroadcastEvent.OpenTab, this.featureId); } }
projectkudu/AzureFunctions
client/src/app/feature-group/feature-item.ts
TypeScript
apache-2.0
5,945
/* * Copyright Strimzi authors. * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). */ package io.strimzi.systemtest.resources.operator.specific; import org.junit.jupiter.api.extension.ExtensionContext; /** * Interface for resources which has different deployment strategies such as Helm or Olm. */ public interface SpecificResourceType { /** * Creates specific resource */ void create(ExtensionContext extensionContext); /** * Delete specific resource */ void delete(); }
ppatierno/kaas
systemtest/src/main/java/io/strimzi/systemtest/resources/operator/specific/SpecificResourceType.java
Java
apache-2.0
567
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.storm.api; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.pattern.Patterns; import akka.util.Timeout; import org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.AddressResolution; import org.apache.flink.runtime.jobmaster.JobMaster; import org.apache.flink.runtime.rpc.akka.AkkaRpcServiceUtils; import org.apache.storm.Config; import org.apache.storm.generated.AlreadyAliveException; import org.apache.storm.generated.InvalidTopologyException; import org.apache.storm.generated.KillOptions; import org.apache.storm.generated.NotAliveException; import org.apache.storm.utils.NimbusClient; import org.apache.storm.utils.Utils; import com.esotericsoftware.kryo.Serializer; import org.apache.flink.api.common.ExecutionConfig; import org.apache.flink.api.common.JobID; import org.apache.flink.client.program.ClusterClient; import org.apache.flink.client.program.JobWithJars; import org.apache.flink.client.program.ProgramInvocationException; import org.apache.flink.client.program.StandaloneClusterClient; import org.apache.flink.configuration.AkkaOptions; import org.apache.flink.configuration.ConfigConstants; import org.apache.flink.configuration.Configuration; import org.apache.flink.configuration.GlobalConfiguration; import org.apache.flink.core.fs.Path; import org.apache.flink.runtime.akka.AkkaUtils; import org.apache.flink.runtime.client.JobStatusMessage; import org.apache.flink.runtime.jobgraph.JobGraph; import org.apache.flink.runtime.messages.JobManagerMessages; import org.apache.flink.runtime.messages.JobManagerMessages.RunningJobsStatus; import org.apache.flink.storm.util.StormConfig; import org.apache.flink.streaming.api.graph.StreamGraph; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.Some; import scala.concurrent.Await; import scala.concurrent.Future; import scala.concurrent.duration.FiniteDuration; import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URL; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Map.Entry; /** * {@link FlinkClient} mimics a Storm {@link NimbusClient} and {@link Nimbus}{@code .Client} at once, to interact with * Flink's JobManager instead of Storm's Nimbus. */ public class FlinkClient { /** The log used by this client. */ private static final Logger LOG = LoggerFactory.getLogger(FlinkClient.class); /** The client's configuration */ private final Map<?,?> conf; /** The jobmanager's host name */ private final String jobManagerHost; /** The jobmanager's rpc port */ private final int jobManagerPort; /** The user specified timeout in milliseconds */ private final String timeout; // The following methods are derived from "backtype.storm.utils.NimbusClient" /** * Instantiates a new {@link FlinkClient} for the given configuration, host name, and port. If values for {@link * Config#NIMBUS_HOST} and {@link Config#NIMBUS_THRIFT_PORT} of the given configuration are ignored. * * @param conf * A configuration. * @param host * The jobmanager's host name. * @param port * The jobmanager's rpc port. */ @SuppressWarnings("rawtypes") public FlinkClient(final Map conf, final String host, final int port) { this(conf, host, port, null); } /** * Instantiates a new {@link FlinkClient} for the given configuration, host name, and port. If values for {@link * Config#NIMBUS_HOST} and {@link Config#NIMBUS_THRIFT_PORT} of the given configuration are ignored. * * @param conf * A configuration. * @param host * The jobmanager's host name. * @param port * The jobmanager's rpc port. * @param timeout * Timeout */ @SuppressWarnings("rawtypes") public FlinkClient(final Map conf, final String host, final int port, final Integer timeout) { this.conf = conf; this.jobManagerHost = host; this.jobManagerPort = port; if (timeout != null) { this.timeout = timeout + " ms"; } else { this.timeout = null; } } /** * Returns a {@link FlinkClient} that uses the configured {@link Config#NIMBUS_HOST} and {@link * Config#NIMBUS_THRIFT_PORT} as JobManager address. * * @param conf * Configuration that contains the jobmanager's hostname and port. * @return A configured {@link FlinkClient}. */ @SuppressWarnings("rawtypes") public static FlinkClient getConfiguredClient(final Map conf) { final String nimbusHost = (String) conf.get(Config.NIMBUS_HOST); final int nimbusPort = Utils.getInt(conf.get(Config.NIMBUS_THRIFT_PORT)).intValue(); return new FlinkClient(conf, nimbusHost, nimbusPort); } /** * Return a reference to itself. * <p> * {@link FlinkClient} mimics both, {@link NimbusClient} and {@link Nimbus}{@code .Client}, at once. * * @return A reference to itself. */ public FlinkClient getClient() { return this; } // The following methods are derived from "backtype.storm.generated.Nimubs.Client" /** * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support * uploading a jar file before hand. Jar files are always uploaded directly when a program is submitted. */ public void submitTopology(final String name, final String uploadedJarLocation, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException { this.submitTopologyWithOpts(name, uploadedJarLocation, topology); } /** * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support * uploading a jar file before hand. Jar files are always uploaded directly when a program is submitted. */ public void submitTopologyWithOpts(final String name, final String uploadedJarLocation, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException { if (this.getTopologyJobId(name) != null) { throw new AlreadyAliveException(); } final URI uploadedJarUri; final URL uploadedJarUrl; try { uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI(); uploadedJarUrl = uploadedJarUri.toURL(); JobWithJars.checkJarFile(uploadedJarUrl); } catch (final IOException e) { throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e); } try { FlinkClient.addStormConfigToTopology(topology, conf); } catch(ClassNotFoundException e) { LOG.error("Could not register class for Kryo serialization.", e); throw new InvalidTopologyException("Could not register class for Kryo serialization."); } final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph(); streamGraph.setJobName(name); final JobGraph jobGraph = streamGraph.getJobGraph(); jobGraph.addJar(new Path(uploadedJarUri)); final Configuration configuration = jobGraph.getJobConfiguration(); configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost); configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort); final ClusterClient client; try { client = new StandaloneClusterClient(configuration); } catch (final Exception e) { throw new RuntimeException("Could not establish a connection to the job manager", e); } try { ClassLoader classLoader = JobWithJars.buildUserCodeClassLoader( Collections.<URL>singletonList(uploadedJarUrl), Collections.<URL>emptyList(), this.getClass().getClassLoader()); client.runDetached(jobGraph, classLoader); } catch (final ProgramInvocationException e) { throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e); } } public void killTopology(final String name) throws NotAliveException { this.killTopologyWithOpts(name, null); } public void killTopologyWithOpts(final String name, final KillOptions options) throws NotAliveException { final JobID jobId = this.getTopologyJobId(name); if (jobId == null) { throw new NotAliveException("Storm topology with name " + name + " not found."); } if (options != null) { try { Thread.sleep(1000 * options.get_wait_secs()); } catch (final InterruptedException e) { throw new RuntimeException(e); } } final Configuration configuration = GlobalConfiguration.loadConfiguration(); configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, this.jobManagerHost); configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, this.jobManagerPort); final ClusterClient client; try { client = new StandaloneClusterClient(configuration); } catch (final Exception e) { throw new RuntimeException("Could not establish a connection to the job manager", e); } try { client.stop(jobId); } catch (final Exception e) { throw new RuntimeException("Cannot stop job.", e); } } // Flink specific additional methods /** * Package internal method to get a Flink {@link JobID} from a Storm topology name. * * @param id * The Storm topology name. * @return Flink's internally used {@link JobID}. */ JobID getTopologyJobId(final String id) { final Configuration configuration = GlobalConfiguration.loadConfiguration(); if (this.timeout != null) { configuration.setString(AkkaOptions.ASK_TIMEOUT, this.timeout); } try { final ActorRef jobManager = this.getJobManager(); final FiniteDuration askTimeout = this.getTimeout(); final Future<Object> response = Patterns.ask(jobManager, JobManagerMessages.getRequestRunningJobsStatus(), new Timeout(askTimeout)); final Object result; try { result = Await.result(response, askTimeout); } catch (final Exception e) { throw new RuntimeException("Could not retrieve running jobs from the JobManager", e); } if (result instanceof RunningJobsStatus) { final List<JobStatusMessage> jobs = ((RunningJobsStatus) result).getStatusMessages(); for (final JobStatusMessage status : jobs) { if (status.getJobName().equals(id)) { return status.getJobId(); } } } else { throw new RuntimeException("ReqeustRunningJobs requires a response of type " + "RunningJobs. Instead the response is of type " + result.getClass() + "."); } } catch (final IOException e) { throw new RuntimeException("Could not connect to Flink JobManager with address " + this.jobManagerHost + ":" + this.jobManagerPort, e); } return null; } private FiniteDuration getTimeout() { final Configuration configuration = GlobalConfiguration.loadConfiguration(); if (this.timeout != null) { configuration.setString(AkkaOptions.ASK_TIMEOUT, this.timeout); } return AkkaUtils.getClientTimeout(configuration); } private ActorRef getJobManager() throws IOException { final Configuration configuration = GlobalConfiguration.loadConfiguration(); ActorSystem actorSystem; try { final scala.Tuple2<String, Object> systemEndpoint = new scala.Tuple2<String, Object>("", 0); actorSystem = AkkaUtils.createActorSystem(configuration, new Some<scala.Tuple2<String, Object>>( systemEndpoint)); } catch (final Exception e) { throw new RuntimeException("Could not start actor system to communicate with JobManager", e); } final String jobManagerAkkaUrl = AkkaRpcServiceUtils.getRpcUrl( jobManagerHost, jobManagerPort, JobMaster.JOB_MANAGER_NAME, AddressResolution.TRY_ADDRESS_RESOLUTION, configuration); return AkkaUtils.getActorRef(jobManagerAkkaUrl, actorSystem, AkkaUtils.getLookupTimeout(configuration)); } @SuppressWarnings({ "unchecked", "rawtypes" }) static void addStormConfigToTopology(FlinkTopology topology, Map conf) throws ClassNotFoundException { if (conf != null) { ExecutionConfig flinkConfig = topology.getExecutionEnvironment().getConfig(); flinkConfig.setGlobalJobParameters(new StormConfig(conf)); // add all registered types to ExecutionConfig List<?> registeredClasses = (List<?>) conf.get(Config.TOPOLOGY_KRYO_REGISTER); if (registeredClasses != null) { for (Object klass : registeredClasses) { if (klass instanceof String) { flinkConfig.registerKryoType(Class.forName((String) klass)); } else { for (Entry<String,String> register : ((Map<String,String>)klass).entrySet()) { flinkConfig.registerTypeWithKryoSerializer(Class.forName(register.getKey()), (Class<? extends Serializer<?>>)Class.forName(register.getValue())); } } } } } } }
WangTaoTheTonic/flink
flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java
Java
apache-2.0
13,243
// +build go1.7 package vmutils import ( "encoding/xml" "testing" vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" ) func Test_AddAzureVMExtensionConfiguration(t *testing.T) { role := vm.Role{} AddAzureVMExtensionConfiguration(&role, "nameOfExtension", "nameOfPublisher", "versionOfExtension", "nameOfReference", "state", []byte{1, 2, 3}, []byte{}) data, err := xml.MarshalIndent(role, "", " ") if err != nil { t.Fatal(err) } if expected := `<Role> <ConfigurationSets></ConfigurationSets> <ResourceExtensionReferences> <ResourceExtensionReference> <ReferenceName>nameOfReference</ReferenceName> <Publisher>nameOfPublisher</Publisher> <Name>nameOfExtension</Name> <Version>versionOfExtension</Version> <ResourceExtensionParameterValues> <ResourceExtensionParameterValue> <Key>ignored</Key> <Value>AQID</Value> <Type>Public</Type> </ResourceExtensionParameterValue> </ResourceExtensionParameterValues> <State>state</State> </ResourceExtensionReference> </ResourceExtensionReferences> <DataVirtualHardDisks></DataVirtualHardDisks> </Role>`; string(data) != expected { t.Fatalf("Expected %q, but got %q", expected, string(data)) } }
k4k/kubicorn
vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go
GO
apache-2.0
1,272
// Package goparse contains logic for parsing Go files. Specifically it parses // source and test files into domain models for generating tests. package goparser import ( "errors" "fmt" "go/ast" "go/parser" "go/token" "go/types" "io/ioutil" "strings" "github.com/cweill/gotests/internal/models" ) // ErrEmptyFile represents an empty file error. var ErrEmptyFile = errors.New("file is empty") // Result representats a parsed Go file. type Result struct { // The package name and imports of a Go file. Header *models.Header // All the functions and methods in a Go file. Funcs []*models.Function } // Parser can parse Go files. type Parser struct { // The importer to resolve packages from import paths. Importer types.Importer } // Parse parses a given Go file at srcPath, along any files that share the same // package, into a domain model for generating tests. func (p *Parser) Parse(srcPath string, files []models.Path) (*Result, error) { b, err := p.readFile(srcPath) if err != nil { return nil, err } fset := token.NewFileSet() f, err := p.parseFile(fset, srcPath) if err != nil { return nil, err } fs, err := p.parseFiles(fset, f, files) if err != nil { return nil, err } return &Result{ Header: &models.Header{ Comments: parseComment(f, f.Package), Package: f.Name.String(), Imports: parseImports(f.Imports), Code: goCode(b, f), }, Funcs: p.parseFunctions(fset, f, fs), }, nil } func (p *Parser) readFile(srcPath string) ([]byte, error) { b, err := ioutil.ReadFile(srcPath) if err != nil { return nil, fmt.Errorf("ioutil.ReadFile: %v", err) } if len(b) == 0 { return nil, ErrEmptyFile } return b, nil } func (p *Parser) parseFile(fset *token.FileSet, srcPath string) (*ast.File, error) { f, err := parser.ParseFile(fset, srcPath, nil, parser.ParseComments) if err != nil { return nil, fmt.Errorf("target parser.ParseFile(): %v", err) } return f, nil } func (p *Parser) parseFiles(fset *token.FileSet, f *ast.File, files []models.Path) ([]*ast.File, error) { pkg := f.Name.String() var fs []*ast.File for _, file := range files { ff, err := parser.ParseFile(fset, string(file), nil, 0) if err != nil { return nil, fmt.Errorf("other file parser.ParseFile: %v", err) } if name := ff.Name.String(); name != pkg { continue } fs = append(fs, ff) } return fs, nil } func (p *Parser) parseFunctions(fset *token.FileSet, f *ast.File, fs []*ast.File) []*models.Function { ul, el := p.parseTypes(fset, fs) var funcs []*models.Function for _, d := range f.Decls { fDecl, ok := d.(*ast.FuncDecl) if !ok { continue } funcs = append(funcs, parseFunc(fDecl, ul, el)) } return funcs } func (p *Parser) parseTypes(fset *token.FileSet, fs []*ast.File) (map[string]types.Type, map[*types.Struct]ast.Expr) { conf := &types.Config{ Importer: p.Importer, // Adding a NO-OP error function ignores errors and performs best-effort // type checking. https://godoc.org/golang.org/x/tools/go/types#Config Error: func(error) {}, } ti := &types.Info{ Types: make(map[ast.Expr]types.TypeAndValue), } // Note: conf.Check can fail, but since Info is not required data, it's ok. conf.Check("", fset, fs, ti) ul := make(map[string]types.Type) el := make(map[*types.Struct]ast.Expr) for e, t := range ti.Types { // Collect the underlying types. ul[t.Type.String()] = t.Type.Underlying() // Collect structs to determine the fields of a receiver. if v, ok := t.Type.(*types.Struct); ok { el[v] = e } } return ul, el } func parseComment(f *ast.File, pkgPos token.Pos) []string { var comments []string var count int for _, comment := range f.Comments { if comment.End() < pkgPos && comment != f.Doc { for _, c := range comment.List { count += len(c.Text) + 1 // +1 for '\n' if count < int(c.End()) { n := int(c.End()) - count comments = append(comments, strings.Repeat("\n", n)) count++ // for last of '\n' } comments = append(comments, c.Text) } } } return comments } // Returns the Go code below the imports block. func goCode(b []byte, f *ast.File) []byte { furthestPos := f.Name.End() for _, node := range f.Imports { if pos := node.End(); pos > furthestPos { furthestPos = pos } } if furthestPos < token.Pos(len(b)) { furthestPos++ } return b[furthestPos:] } func parseFunc(fDecl *ast.FuncDecl, ul map[string]types.Type, el map[*types.Struct]ast.Expr) *models.Function { f := &models.Function{ Name: fDecl.Name.String(), IsExported: fDecl.Name.IsExported(), Receiver: parseReceiver(fDecl.Recv, ul, el), Parameters: parseFieldList(fDecl.Type.Params, ul), } fs := parseFieldList(fDecl.Type.Results, ul) i := 0 for _, fi := range fs { if fi.Type.String() == "error" { f.ReturnsError = true continue } fi.Index = i f.Results = append(f.Results, fi) i++ } return f } func parseImports(imps []*ast.ImportSpec) []*models.Import { var is []*models.Import for _, imp := range imps { var n string if imp.Name != nil { n = imp.Name.String() } is = append(is, &models.Import{ Name: n, Path: imp.Path.Value, }) } return is } func parseReceiver(fl *ast.FieldList, ul map[string]types.Type, el map[*types.Struct]ast.Expr) *models.Receiver { if fl == nil { return nil } r := &models.Receiver{ Field: parseFieldList(fl, ul)[0], } t, ok := ul[r.Type.Value] if !ok { return r } s, ok := t.(*types.Struct) if !ok { return r } st := el[s].(*ast.StructType) r.Fields = append(r.Fields, parseFieldList(st.Fields, ul)...) for i, f := range r.Fields { f.Name = s.Field(i).Name() } return r } func parseFieldList(fl *ast.FieldList, ul map[string]types.Type) []*models.Field { if fl == nil { return nil } i := 0 var fs []*models.Field for _, f := range fl.List { for _, pf := range parseFields(f, ul) { pf.Index = i fs = append(fs, pf) i++ } } return fs } func parseFields(f *ast.Field, ul map[string]types.Type) []*models.Field { t := parseExpr(f.Type, ul) if len(f.Names) == 0 { return []*models.Field{{ Type: t, }} } var fs []*models.Field for _, n := range f.Names { fs = append(fs, &models.Field{ Name: n.Name, Type: t, }) } return fs } func parseExpr(e ast.Expr, ul map[string]types.Type) *models.Expression { switch v := e.(type) { case *ast.StarExpr: val := types.ExprString(v.X) return &models.Expression{ Value: val, IsStar: true, Underlying: underlying(val, ul), } case *ast.Ellipsis: exp := parseExpr(v.Elt, ul) return &models.Expression{ Value: exp.Value, IsStar: exp.IsStar, IsVariadic: true, Underlying: underlying(exp.Value, ul), } default: val := types.ExprString(e) return &models.Expression{ Value: val, Underlying: underlying(val, ul), IsWriter: val == "io.Writer", } } } func underlying(val string, ul map[string]types.Type) string { if ul[val] != nil { return ul[val].String() } return "" }
unrealinux/FinancialDataCrawlingPlatform
src/github.com/cweill/gotests/internal/goparser/goparser.go
GO
apache-2.0
7,001
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.groovy.parser; import groovy.lang.GroovyClassLoader; import org.codehaus.groovy.ast.ModuleNode; import org.codehaus.groovy.control.CompilerConfiguration; import org.codehaus.groovy.control.ErrorCollector; import org.codehaus.groovy.control.SourceUnit; import java.io.File; import java.util.logging.Level; import java.util.logging.Logger; /** * The base parser for creating a module node. * The concrete compiler configuration can be specified by the sub-classes of the base parser(e.g. Antlr2Parser, Antlr4Parser) * * @author <a href="mailto:realbluesun@hotmail.com">Daniel.Sun</a> * Created on 2016/08/14 */ public abstract class AbstractParser { private static final Logger LOGGER = Logger.getLogger(AbstractParser.class.getName()); public ModuleNode parse(File file) { if (null == file || !file.exists()) { throw new IllegalArgumentException(file + " does not exist."); } CompilerConfiguration configuration = this.getCompilerConfiguration(); SourceUnit sourceUnit = new SourceUnit(file, configuration, new GroovyClassLoader(), new ErrorCollector(configuration)); return this.parse(sourceUnit); } public ModuleNode parse(String name, String text) { if (null == name) { throw new IllegalArgumentException("name should not be null"); } if (null == text) { throw new IllegalArgumentException("text should not be null"); } CompilerConfiguration configuration = this.getCompilerConfiguration(); SourceUnit sourceUnit = new SourceUnit(name, text, configuration, new GroovyClassLoader(), new ErrorCollector(configuration)); return this.parse(sourceUnit); } public ModuleNode parse(SourceUnit sourceUnit) { try { sourceUnit.parse(); sourceUnit.completePhase(); sourceUnit.nextPhase(); sourceUnit.convert(); return sourceUnit.getAST(); } catch (Exception e) { LOGGER.log(Level.SEVERE, "Failed to parse " + sourceUnit.getName(), e); return null; } } protected abstract CompilerConfiguration getCompilerConfiguration(); }
dpolivaev/groovy
subprojects/parser-antlr4/src/main/java/org/apache/groovy/parser/AbstractParser.java
Java
apache-2.0
3,051
/* * The contents of this file are subject to the license and copyright * detailed in the LICENSE and NOTICE files at the root of the source * tree and available online at * * http://duracloud.org/license/ */ package org.duracloud.glaciertask; import com.amazonaws.services.s3.AmazonS3; import org.duracloud.glacierstorage.GlacierStorageProvider; import org.duracloud.storage.provider.StorageProvider; import org.duracloud.storage.provider.TaskProviderBase; import org.slf4j.LoggerFactory; /** * @author: Bill Branan * Date: 2/1/13 */ public class GlacierTaskProvider extends TaskProviderBase { public GlacierTaskProvider(StorageProvider glacierProvider, GlacierStorageProvider unwrappedGlacierProvider, AmazonS3 s3Client, String storeId) { super(storeId); log = LoggerFactory.getLogger(GlacierTaskProvider.class); taskList.add(new RestoreContentTaskRunner(glacierProvider, unwrappedGlacierProvider, s3Client)); } }
duracloud/duracloud
glacierstorageprovider/src/main/java/org/duracloud/glaciertask/GlacierTaskProvider.java
Java
apache-2.0
1,193
/** * Copyright (C) 2015 Topology LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef BONEFISH_RAWSOCKET_SERVER_IMPL_HPP #define BONEFISH_RAWSOCKET_SERVER_IMPL_HPP #include <bonefish/rawsocket/rawsocket_listener.hpp> #include <bonefish/rawsocket/rawsocket_connection.hpp> #include <bonefish/common/wamp_message_processor.hpp> #include <boost/asio/ip/address.hpp> #include <boost/asio/ip/tcp.hpp> #include <set> #include <memory> namespace bonefish { class wamp_routers; class wamp_serializers; class rawsocket_server_impl : public std::enable_shared_from_this<rawsocket_server_impl> { public: rawsocket_server_impl( const std::shared_ptr<wamp_routers>& routers, const std::shared_ptr<wamp_serializers>& serializers); ~rawsocket_server_impl(); void attach_listener(const std::shared_ptr<rawsocket_listener>& listener); void start(); void shutdown(); private: void on_connect(const std::shared_ptr<rawsocket_connection>& connection); void on_handshake(const std::shared_ptr<rawsocket_connection>& connection, uint32_t capabilities); void on_message(const std::shared_ptr<rawsocket_connection>& connection, const char* buffer, size_t length); void on_close(const std::shared_ptr<rawsocket_connection>& connection); void on_fail(const std::shared_ptr<rawsocket_connection>& connection, const char* reason); void teardown_connection(const std::shared_ptr<rawsocket_connection>& connection); private: std::shared_ptr<wamp_routers> m_routers; std::shared_ptr<wamp_serializers> m_serializers; std::set<std::shared_ptr<rawsocket_listener>, std::owner_less<std::shared_ptr<rawsocket_listener>>> m_listeners; std::set<std::shared_ptr<rawsocket_connection>, std::owner_less<std::shared_ptr<rawsocket_connection>>> m_connections; wamp_message_processor m_message_processor; }; } // namespace bonefish #endif // BONEFISH_RAWSOCKET_SERVER_IMPL_HPP
jpetso/bonefish
src/bonefish/rawsocket/rawsocket_server_impl.hpp
C++
apache-2.0
2,525
/* * Licensed to Metamarkets Group Inc. (Metamarkets) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Metamarkets licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.druid.query.lookup; import com.fasterxml.jackson.annotation.JacksonInject; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import io.druid.query.extraction.ExtractionFn; import javax.annotation.Nullable; public class RegisteredLookupExtractionFn implements ExtractionFn { private final LookupExtractionFn delegate; private final String name; RegisteredLookupExtractionFn(LookupExtractionFn delegate, String name) { this.delegate = delegate; this.name = name; } @JsonCreator public static RegisteredLookupExtractionFn create( @JacksonInject LookupReferencesManager manager, @JsonProperty("lookup") String lookup, @JsonProperty("retainMissingValue") final boolean retainMissingValue, @Nullable @JsonProperty("replaceMissingValueWith") final String replaceMissingValueWith, @JsonProperty("injective") final boolean injective, @JsonProperty("optimize") Boolean optimize ) { Preconditions.checkArgument(lookup != null, "`lookup` required"); final LookupExtractorFactory factory = manager.get(lookup); Preconditions.checkNotNull(factory, "lookup [%s] not found", lookup); return new RegisteredLookupExtractionFn( new LookupExtractionFn( factory.get(), retainMissingValue, replaceMissingValueWith, injective, optimize ), lookup ); } @JsonProperty("lookup") public String getLookup() { return name; } @JsonProperty("retainMissingValue") public boolean isRetainMissingValue() { return delegate.isRetainMissingValue(); } @JsonProperty("replaceMissingValueWith") public String getReplaceMissingValueWith() { return delegate.getReplaceMissingValueWith(); } @JsonProperty("injective") public boolean isInjective() { return delegate.isInjective(); } @JsonProperty("optimize") public boolean isOptimize() { return delegate.isOptimize(); } @Override public byte[] getCacheKey() { return delegate.getCacheKey(); } @Override public String apply(Object value) { return delegate.apply(value); } @Override public String apply(String value) { return delegate.apply(value); } @Override public String apply(long value) { return delegate.apply(value); } @Override public boolean preservesOrdering() { return delegate.preservesOrdering(); } @Override public ExtractionType getExtractionType() { return delegate.getExtractionType(); } @Override public String toString() { return "RegisteredLookupExtractionFn{" + "delegate=" + delegate + ", name='" + name + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } RegisteredLookupExtractionFn that = (RegisteredLookupExtractionFn) o; if (!delegate.equals(that.delegate)) { return false; } return name.equals(that.name); } @Override public int hashCode() { int result = delegate.hashCode(); result = 31 * result + name.hashCode(); return result; } }
rasahner/druid
processing/src/main/java/io/druid/query/lookup/RegisteredLookupExtractionFn.java
Java
apache-2.0
4,142
<?php namespace Phalcon\Forms\Element { /** * Phalcon\Forms\Element\Email * * Component INPUT[type=email] for forms */ class Email extends \Phalcon\Forms\Element implements \Phalcon\Forms\ElementInterface { /** * Renders the element widget returning html * * @param array attributes * @return string */ public function render($attributes=null){ } } }
nueko/phalcon-ide-stub
Phalcon/Forms/Element/Email.php
PHP
apache-2.0
388
// Copyright (C) 2013 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.gerrit.server.util; import com.google.gerrit.server.CurrentUser; import com.google.gerrit.server.PluginUser; /** RequestContext active while plugins load or unload. */ public class PluginRequestContext implements RequestContext { private final PluginUser user; public PluginRequestContext(PluginUser user) { this.user = user; } @Override public CurrentUser getUser() { return user; } }
qtproject/qtqa-gerrit
java/com/google/gerrit/server/util/PluginRequestContext.java
Java
apache-2.0
1,038
package eu.drus.jpa.unit.mongodb.ext; import static com.google.common.base.Preconditions.checkArgument; import java.util.ArrayList; import java.util.Collections; import java.util.Map; import com.mongodb.MongoClientOptions; import com.mongodb.MongoCredential; import com.mongodb.ReadPreference; import com.mongodb.ServerAddress; import com.mongodb.WriteConcern; import eu.drus.jpa.unit.spi.PersistenceUnitDescriptor; public class EclipseLinkConfiguration extends AbstractConfiguration { private static final String ECLIPSELINK_NOSQL_PROPERTY_MONGO_READ_PREFERENCE = "eclipselink.nosql.property.mongo.read-preference"; private static final String ECLIPSELINK_NOSQL_PROPERTY_MONGO_WRITE_CONCERN = "eclipselink.nosql.property.mongo.write-concern"; private static final String ECLIPSELINK_NOSQL_PROPERTY_PASSWORD = "eclipselink.nosql.property.password"; private static final String ECLIPSELINK_NOSQL_PROPERTY_USER = "eclipselink.nosql.property.user"; private static final String ECLIPSELINK_NOSQL_PROPERTY_MONGO_DB = "eclipselink.nosql.property.mongo.db"; private static final String ECLIPSELINK_NOSQL_PROPERTY_MONGO_HOST = "eclipselink.nosql.property.mongo.host"; private static final String ECLIPSELINK_NOSQL_PROPERTY_MONGO_PORT = "eclipselink.nosql.property.mongo.port"; private static final String ECLIPSELINK_TARGET_DATABASE = "eclipselink.target-database"; public static class ConfigurationFactoryImpl implements ConfigurationFactory { @Override public boolean isSupported(final PersistenceUnitDescriptor descriptor) { final Map<String, Object> properties = descriptor.getProperties(); final String tgtDataBase = (String) properties.get(ECLIPSELINK_TARGET_DATABASE); return tgtDataBase != null && tgtDataBase.contains("mongo"); } @Override public Configuration createConfiguration(final PersistenceUnitDescriptor descriptor) { return new EclipseLinkConfiguration(descriptor); } } private EclipseLinkConfiguration(final PersistenceUnitDescriptor descriptor) { final Map<String, Object> properties = descriptor.getProperties(); configureServerAddresses(properties); configureDatabaseName(properties); configureCredentials(properties); configureClientOptions(properties); } private void configureClientOptions(final Map<String, Object> properties) { final MongoClientOptions.Builder builder = MongoClientOptions.builder(); final String writeConcern = (String) properties.get(ECLIPSELINK_NOSQL_PROPERTY_MONGO_WRITE_CONCERN); final String readPreference = (String) properties.get(ECLIPSELINK_NOSQL_PROPERTY_MONGO_READ_PREFERENCE); if (writeConcern != null) { builder.writeConcern(WriteConcern.valueOf(writeConcern)); } if (readPreference != null) { builder.readPreference(ReadPreference.valueOf(readPreference)); } mongoClientOptions = builder.build(); } private void configureCredentials(final Map<String, Object> properties) { final String userName = (String) properties.get(ECLIPSELINK_NOSQL_PROPERTY_USER); final String password = (String) properties.get(ECLIPSELINK_NOSQL_PROPERTY_PASSWORD); if (userName != null) { checkArgument(password != null, ECLIPSELINK_NOSQL_PROPERTY_PASSWORD + " was not configured, but required"); mongoCredentialList = Collections .singletonList(MongoCredential.createPlainCredential(userName, "admin", password.toCharArray())); } else { mongoCredentialList = Collections.emptyList(); } } private void configureDatabaseName(final Map<String, Object> properties) { databaseName = (String) properties.get(ECLIPSELINK_NOSQL_PROPERTY_MONGO_DB); checkArgument(databaseName != null, ECLIPSELINK_NOSQL_PROPERTY_MONGO_DB + " was not configured, but required"); } private void configureServerAddresses(final Map<String, Object> properties) { final String ports = (String) properties.get(ECLIPSELINK_NOSQL_PROPERTY_MONGO_PORT); final String hosts = (String) properties.get(ECLIPSELINK_NOSQL_PROPERTY_MONGO_HOST); final String[] hostList = hosts != null ? hosts.split(",") : new String[] {}; final String[] portList = ports != null ? ports.split(",") : new String[] {}; serverAddresses = new ArrayList<>(); for (int i = 0; i < hostList.length; i++) { int port; if (i >= portList.length) { port = ServerAddress.defaultPort(); } else { port = Integer.valueOf(portList[i].trim()); } serverAddresses.add(new ServerAddress(hostList[i].trim(), port)); } if (serverAddresses.isEmpty()) { serverAddresses.add(new ServerAddress()); } } }
dadrus/persistence-test
mongodb/src/main/java/eu/drus/jpa/unit/mongodb/ext/EclipseLinkConfiguration.java
Java
apache-2.0
4,957
// Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/api/config_change.proto package com.google.api; /** * <pre> * Generated advice about this change, used for providing more * information about how a change will affect the existing service. * </pre> * * Protobuf type {@code google.api.Advice} */ public final class Advice extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.api.Advice) AdviceOrBuilder { // Use Advice.newBuilder() to construct. private Advice(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private Advice() { description_ = ""; } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private Advice( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!input.skipField(tag)) { done = true; } break; } case 18: { java.lang.String s = input.readStringRequireUtf8(); description_ = s; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.api.ConfigChangeProto.internal_static_google_api_Advice_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.api.ConfigChangeProto.internal_static_google_api_Advice_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.api.Advice.class, com.google.api.Advice.Builder.class); } public static final int DESCRIPTION_FIELD_NUMBER = 2; private volatile java.lang.Object description_; /** * <pre> * Useful description for why this advice was applied and what actions should * be taken to mitigate any implied risks. * </pre> * * <code>optional string description = 2;</code> */ public java.lang.String getDescription() { java.lang.Object ref = description_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); description_ = s; return s; } } /** * <pre> * Useful description for why this advice was applied and what actions should * be taken to mitigate any implied risks. * </pre> * * <code>optional string description = 2;</code> */ public com.google.protobuf.ByteString getDescriptionBytes() { java.lang.Object ref = description_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); description_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!getDescriptionBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, description_); } } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (!getDescriptionBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, description_); } memoizedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.api.Advice)) { return super.equals(obj); } com.google.api.Advice other = (com.google.api.Advice) obj; boolean result = true; result = result && getDescription() .equals(other.getDescription()); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; hash = (53 * hash) + getDescription().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.api.Advice parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.api.Advice parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.api.Advice parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.api.Advice parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.api.Advice parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static com.google.api.Advice parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static com.google.api.Advice parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static com.google.api.Advice parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static com.google.api.Advice parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static com.google.api.Advice parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.api.Advice prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * <pre> * Generated advice about this change, used for providing more * information about how a change will affect the existing service. * </pre> * * Protobuf type {@code google.api.Advice} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.api.Advice) com.google.api.AdviceOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.api.ConfigChangeProto.internal_static_google_api_Advice_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.api.ConfigChangeProto.internal_static_google_api_Advice_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.api.Advice.class, com.google.api.Advice.Builder.class); } // Construct using com.google.api.Advice.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } public Builder clear() { super.clear(); description_ = ""; return this; } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.api.ConfigChangeProto.internal_static_google_api_Advice_descriptor; } public com.google.api.Advice getDefaultInstanceForType() { return com.google.api.Advice.getDefaultInstance(); } public com.google.api.Advice build() { com.google.api.Advice result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public com.google.api.Advice buildPartial() { com.google.api.Advice result = new com.google.api.Advice(this); result.description_ = description_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.setField(field, value); } public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.api.Advice) { return mergeFrom((com.google.api.Advice)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.api.Advice other) { if (other == com.google.api.Advice.getDefaultInstance()) return this; if (!other.getDescription().isEmpty()) { description_ = other.description_; onChanged(); } onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.api.Advice parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.api.Advice) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private java.lang.Object description_ = ""; /** * <pre> * Useful description for why this advice was applied and what actions should * be taken to mitigate any implied risks. * </pre> * * <code>optional string description = 2;</code> */ public java.lang.String getDescription() { java.lang.Object ref = description_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); description_ = s; return s; } else { return (java.lang.String) ref; } } /** * <pre> * Useful description for why this advice was applied and what actions should * be taken to mitigate any implied risks. * </pre> * * <code>optional string description = 2;</code> */ public com.google.protobuf.ByteString getDescriptionBytes() { java.lang.Object ref = description_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); description_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <pre> * Useful description for why this advice was applied and what actions should * be taken to mitigate any implied risks. * </pre> * * <code>optional string description = 2;</code> */ public Builder setDescription( java.lang.String value) { if (value == null) { throw new NullPointerException(); } description_ = value; onChanged(); return this; } /** * <pre> * Useful description for why this advice was applied and what actions should * be taken to mitigate any implied risks. * </pre> * * <code>optional string description = 2;</code> */ public Builder clearDescription() { description_ = getDefaultInstance().getDescription(); onChanged(); return this; } /** * <pre> * Useful description for why this advice was applied and what actions should * be taken to mitigate any implied risks. * </pre> * * <code>optional string description = 2;</code> */ public Builder setDescriptionBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); description_ = value; onChanged(); return this; } public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return this; } public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return this; } // @@protoc_insertion_point(builder_scope:google.api.Advice) } // @@protoc_insertion_point(class_scope:google.api.Advice) private static final com.google.api.Advice DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.api.Advice(); } public static com.google.api.Advice getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<Advice> PARSER = new com.google.protobuf.AbstractParser<Advice>() { public Advice parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new Advice(input, extensionRegistry); } }; public static com.google.protobuf.Parser<Advice> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<Advice> getParserForType() { return PARSER; } public com.google.api.Advice getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }
speedycontrol/googleapis
output/com/google/api/Advice.java
Java
apache-2.0
17,048
package aws import ( "github.com/solo-io/unik/pkg/providers/common" "github.com/solo-io/unik/pkg/types" ) func (p *AwsProvider) GetImage(nameOrIdPrefix string) (*types.Image, error) { return common.GetImage(p, nameOrIdPrefix) }
emc-advanced-dev/unik
pkg/providers/aws/get_image.go
GO
apache-2.0
233
package run import ( "fmt" "io" "github.com/enaml-ops/enaml" "github.com/enaml-ops/enaml/diff" "github.com/enaml-ops/enaml/pull" ) // DiffCmd runs the diff CLI command type DiffCmd struct { releaseRepo pull.Release release1 string release2 string } // NewDiffCmd creates a new DiffCmd instance. func NewDiffCmd(releaseRepo pull.Release, release1, release2 string) *DiffCmd { return &DiffCmd{ releaseRepo: releaseRepo, release1: release1, release2: release2, } } // All writes out all the differences between the specified releases func (s *DiffCmd) All(w io.Writer) error { differ, err := diff.New(s.releaseRepo, s.release1, s.release2) if err != nil { return err } d, err := differ.Diff() if err != nil { return err } s.printDiffResult(w, d) return nil } // Job writes out the job differences between the specified releases func (s *DiffCmd) Job(job string, w io.Writer) error { differ, err := diff.New(s.releaseRepo, s.release1, s.release2) if err != nil { return err } d, err := differ.DiffJob(job) if err != nil { return err } s.printDiffResult(w, d) return nil } func (s *DiffCmd) printDiffResult(w io.Writer, d *diff.Result) { for _, j := range d.DeltaJob { s.printDeltaJob(w, &j) } fmt.Fprintln(w) } func (s *DiffCmd) printDeltaJob(w io.Writer, j *diff.DeltaJob) { fmt.Fprintln(w, "------------------------------------------------------") fmt.Fprintln(w, fmt.Sprintf("Release: %s", j.ReleaseName)) fmt.Fprintln(w, fmt.Sprintf("Job: %s", j.JobName)) fmt.Fprintln(w, "------------------------------------------------------") for pname, prop := range j.AddedProperties { fmt.Fprintln(w, fmt.Sprintf("+ %s", pname)) s.printBoshJobProperty(w, "+", prop) fmt.Fprintln(w) } for pname, prop := range j.RemovedProperties { fmt.Fprintln(w, fmt.Sprintf("- %s", pname)) s.printBoshJobProperty(w, "-", prop) fmt.Fprintln(w) } fmt.Fprintln(w) } func (s *DiffCmd) printBoshJobProperty(w io.Writer, addedRemoved string, p enaml.JobManifestProperty) { if len(p.Description) > 0 { fmt.Fprintln(w, fmt.Sprintf("%s Description: %s", addedRemoved, p.Description)) } if p.Default != nil { fmt.Fprintln(w, fmt.Sprintf("%s Default: %v", addedRemoved, p.Default)) } }
xchapter7x/enaml
run/diffcmd.go
GO
apache-2.0
2,253
/**************************************************************************** * Copyright (C) 2012 ecsec GmbH. * All rights reserved. * Contact: ecsec GmbH (info@ecsec.de) * * This file is part of the Open eCard App. * * GNU General Public License Usage * This file may be used under the terms of the GNU General Public * License version 3.0 as published by the Free Software Foundation * and appearing in the file LICENSE.GPL included in the packaging of * this file. Please review the following information to ensure the * GNU General Public License version 3.0 requirements will be met: * http://www.gnu.org/copyleft/gpl.html. * * Other Usage * Alternatively, this file may be used in accordance with the terms * and conditions contained in a signed written agreement between * you and ecsec GmbH. * ***************************************************************************/ package org.openecard.gui.swing; import java.awt.AWTEvent; import java.awt.BorderLayout; import java.awt.Component; import java.awt.Container; import java.awt.EventQueue; import java.awt.Toolkit; import java.awt.event.ActionEvent; import java.awt.event.KeyEvent; import java.util.List; import javax.swing.BoxLayout; import javax.swing.GroupLayout; import javax.swing.JComponent; import javax.swing.JPanel; import org.openecard.gui.FileDialog; import org.openecard.gui.MessageDialog; import org.openecard.gui.UserConsent; import org.openecard.gui.UserConsentNavigator; import org.openecard.gui.definition.Step; import org.openecard.gui.definition.UserConsentDescription; import org.openecard.gui.swing.common.GUIConstants; /** * Swing implementation of the UserConsent interface. * * @author Tobias Wich <tobias.wich@ecsec.de> * @author Florian Feldmann <florian.feldmann@rub.de> * @author Moritz Horsch <horsch@cdc.informatik.tu-darmstadt.de> */ public class SwingUserConsent implements UserConsent { private final DialogWrapper dialogWrapper; /** * Instantiate SwingUserConsent. * The implementation encapsulates a DialogWrapper which is needed to supply a root pane for all draw operations. * * @param dialogWrapper */ public SwingUserConsent(DialogWrapper dialogWrapper) { this.dialogWrapper = dialogWrapper; } @Override public UserConsentNavigator obtainNavigator(UserConsentDescription parameters) { dialogWrapper.setTitle(parameters.getTitle()); Container rootPanel = dialogWrapper.getContentPane(); rootPanel.removeAll(); String dialogType = parameters.getDialogType(); List<Step> steps = parameters.getSteps(); // Set up panels JPanel stepPanel = new JPanel(new BorderLayout()); JPanel sideBar = new JPanel(); StepBar stepBar = new StepBar(steps); final NavigationBar navigationBar = new NavigationBar(steps.size()); Logo l = new Logo(); initializeSidePanel(sideBar, l, stepBar); final SwingNavigator navigator = new SwingNavigator(dialogWrapper, dialogType, steps, stepPanel, navigationBar, stepBar); navigationBar.registerEvents(navigator); // Add global key listener EventQueue eventQueue = new EventQueue() { ActionEvent e = new ActionEvent(navigationBar, ActionEvent.ACTION_PERFORMED, GUIConstants.BUTTON_NEXT); @Override protected void dispatchEvent(AWTEvent event) { if (event instanceof KeyEvent) { KeyEvent keyEvent = (KeyEvent) event; if (KeyEvent.KEY_RELEASED == keyEvent.getID() && KeyEvent.VK_ENTER == keyEvent.getKeyCode()) { // If the enter is pressed when perform a next step event if (!navigationBar.hasFocus()) { navigator.actionPerformed(e); } } } super.dispatchEvent(event); } }; Toolkit.getDefaultToolkit().getSystemEventQueue().push(eventQueue); // Config layout GroupLayout layout = new GroupLayout(rootPanel); rootPanel.setLayout(layout); layout.setAutoCreateGaps(false); layout.setAutoCreateContainerGaps(true); layout.setHorizontalGroup( layout.createSequentialGroup() .addComponent(sideBar, 200, 200, 200) .addGroup(layout.createParallelGroup() .addComponent(stepPanel) .addComponent(navigationBar))); layout.setVerticalGroup( layout.createParallelGroup(GroupLayout.Alignment.CENTER) .addComponent(sideBar) .addGroup(layout.createSequentialGroup() .addComponent(stepPanel) .addComponent(navigationBar))); rootPanel.validate(); rootPanel.repaint(); return navigator; } @Override public FileDialog obtainFileDialog() { return new SwingFileDialog(); } @Override public MessageDialog obtainMessageDialog() { return new SwingMessageDialog(); } private void initializeSidePanel(JPanel panel, JComponent... components) { panel.setLayout(new BoxLayout(panel, BoxLayout.PAGE_AXIS)); for (JComponent c : components) { c.setAlignmentX(Component.LEFT_ALIGNMENT); panel.add(c); } } }
adelapie/open-ecard-IRMA
gui/swing/src/main/java/org/openecard/gui/swing/SwingUserConsent.java
Java
apache-2.0
4,844
<?php /** * Copyright (c) 2017 Cornell University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ namespace App\OrderformBundle\Controller; use Symfony\Component\HttpFoundation\Response; use Symfony\Component\HttpFoundation\Request; use App\UserdirectoryBundle\Controller\OrderAbstractController; //use Sensio\Bundle\FrameworkExtraBundle\Configuration\Method; //use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route; use Sensio\Bundle\FrameworkExtraBundle\Configuration\Template; use Symfony\Component\Routing\Annotation\Route; class DataReviewController extends OrderAbstractController { /** * @Route("/scan-order/{id}/data-review", name="scan-order-data-review-full", methods={"GET"}, requirements={"id" = "\d+"}) * @Template("AppOrderformBundle/DataReview/index-order.html.twig") */ public function getDataReviewAction($id) { $em = $this->getDoctrine()->getManager(); $message = $em->getRepository('AppOrderformBundle:Message')->findOneByOid($id); $queryE = $em->createQueryBuilder() ->from('AppOrderformBundle:Educational', 'e') ->select("e") ->leftJoin("e.message", "message") ->where("message.id=:id") ->setParameter("id",$id); $educational = $queryE->getQuery()->getResult(); $queryR = $em->createQueryBuilder() ->from('AppOrderformBundle:Research', 'e') ->select("e") ->leftJoin("e.message", "message") ->where("message.id=:id") ->setParameter("id",$id); $research = $queryR->getQuery()->getResult(); return array( 'educationals' => $educational, 'researches' => $research, 'entity' => $message ); } }
victorbrodsky/order-lab
orderflex/src/App/OrderformBundle/Controller/DataReviewController.php
PHP
apache-2.0
2,305
/******************************************************************************* * * Pentaho Big Data * * Copyright (C) 2002-2016 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.big.data.kettle.plugins.mapreduce.step.exit; import org.pentaho.big.data.kettle.plugins.mapreduce.DialogClassUtil; import org.pentaho.di.core.CheckResult; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.annotations.Step; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.exception.KettleException; import org.pentaho.di.core.exception.KettleStepException; import org.pentaho.di.core.exception.KettleXMLException; import org.pentaho.di.core.injection.Injection; import org.pentaho.di.core.injection.InjectionSupported; import org.pentaho.di.core.row.RowMetaInterface; import org.pentaho.di.core.row.ValueMetaInterface; import org.pentaho.di.core.variables.VariableSpace; import org.pentaho.di.core.xml.XMLHandler; import org.pentaho.di.i18n.BaseMessages; import org.pentaho.di.repository.ObjectId; import org.pentaho.di.repository.Repository; import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransMeta; import org.pentaho.di.trans.step.BaseStepMeta; import org.pentaho.di.trans.step.StepDataInterface; import org.pentaho.di.trans.step.StepInterface; import org.pentaho.di.trans.step.StepMeta; import org.pentaho.di.trans.step.StepMetaInterface; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; import java.util.Arrays; import java.util.List; @Step( id = "HadoopExitPlugin", image = "MRO.svg", name = "HadoopExitPlugin.Name", description = "HadoopExitPlugin.Description", documentationUrl = "http://wiki.pentaho.com/display/EAI/MapReduce+Output", categoryDescription = "i18n:org.pentaho.di.trans.step:BaseStep.Category.BigData", i18nPackageName = "org.pentaho.di.trans.steps.hadoopexit" ) @InjectionSupported( localizationPrefix = "HadoopExitPlugin.Injection." ) public class HadoopExitMeta extends BaseStepMeta implements StepMetaInterface { public static final String ERROR_INVALID_KEY_FIELD = "Error.InvalidKeyField"; public static final String ERROR_INVALID_VALUE_FIELD = "Error.InvalidValueField"; public static final String OUT_KEY = "outKey"; public static final String OUT_VALUE = "outValue"; public static final String HADOOP_EXIT_META_CHECK_RESULT_NO_DATA_STREAM = "HadoopExitMeta.CheckResult.NoDataStream"; public static final String HADOOP_EXIT_META_CHECK_RESULT_NO_SPECIFIED_FIELDS = "HadoopExitMeta.CheckResult.NoSpecifiedFields"; public static final String HADOOP_EXIT_META_CHECK_RESULT_STEP_RECEVING_DATA = "HadoopExitMeta.CheckResult.StepRecevingData"; public static final String HADOOP_EXIT_META_CHECK_RESULT_NOT_RECEVING_SPECIFIED_FIELDS = "HadoopExitMeta.CheckResult.NotRecevingSpecifiedFields"; public static Class<?> PKG = HadoopExit.class; // for i18n purposes, needed by Translator2!! $NON-NLS-1$ public static final String DIALOG_NAME = DialogClassUtil.getDialogClassName( PKG ); public static String OUT_KEY_FIELDNAME = "outkeyfieldname"; public static String OUT_VALUE_FIELDNAME = "outvaluefieldname"; @Injection( name = "KEY_FIELD" ) private String outKeyFieldname; @Injection( name = "VALUE_FIELD" ) private String outValueFieldname; public HadoopExitMeta() throws Throwable { super(); } @Override public void loadXML( Node stepnode, List<DatabaseMeta> databases, IMetaStore metaStore ) throws KettleXMLException { setOutKeyFieldname( XMLHandler.getTagValue( stepnode, HadoopExitMeta.OUT_KEY_FIELDNAME ) ); //$NON-NLS-1$ setOutValueFieldname( XMLHandler.getTagValue( stepnode, HadoopExitMeta.OUT_VALUE_FIELDNAME ) ); //$NON-NLS-1$ } @Override public String getXML() { StringBuilder retval = new StringBuilder(); retval.append( " " ).append( XMLHandler.addTagValue( HadoopExitMeta.OUT_KEY_FIELDNAME, getOutKeyFieldname() ) ); retval.append( " " ) .append( XMLHandler.addTagValue( HadoopExitMeta.OUT_VALUE_FIELDNAME, getOutValueFieldname() ) ); return retval.toString(); } public Object clone() { return super.clone(); } @Override public void setDefault() { setOutKeyFieldname( null ); setOutValueFieldname( null ); } @Override public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List<DatabaseMeta> databases ) throws KettleException { setOutKeyFieldname( rep.getStepAttributeString( id_step, HadoopExitMeta.OUT_KEY_FIELDNAME ) ); //$NON-NLS-1$ setOutValueFieldname( rep.getStepAttributeString( id_step, HadoopExitMeta.OUT_VALUE_FIELDNAME ) ); //$NON-NLS-1$ } @Override public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { rep.saveStepAttribute( id_transformation, id_step, HadoopExitMeta.OUT_KEY_FIELDNAME, getOutKeyFieldname() ); //$NON-NLS-1$ rep.saveStepAttribute( id_transformation, id_step, HadoopExitMeta.OUT_VALUE_FIELDNAME, getOutValueFieldname() ); //$NON-NLS-1$ } @Override public void getFields( RowMetaInterface rowMeta, String origin, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space ) throws KettleStepException { ValueMetaInterface key = rowMeta.searchValueMeta( getOutKeyFieldname() ); ValueMetaInterface value = rowMeta.searchValueMeta( getOutValueFieldname() ); if ( key == null ) { throw new KettleStepException( BaseMessages.getString( PKG, ERROR_INVALID_KEY_FIELD, getOutKeyFieldname() ) ); } if ( value == null ) { throw new KettleStepException( BaseMessages.getString( PKG, ERROR_INVALID_VALUE_FIELD, getOutValueFieldname() ) ); } // The output consists of 2 fields: outKey and outValue // The data types rely on the input data type so we look those up // ValueMetaInterface keyMeta = key.clone(); ValueMetaInterface valueMeta = value.clone(); keyMeta.setName( OUT_KEY ); valueMeta.setName( OUT_VALUE ); rowMeta.clear(); rowMeta.addValueMeta( keyMeta ); rowMeta.addValueMeta( valueMeta ); } @Override public void check( List<CheckResultInterface> remarks, TransMeta transMeta, StepMeta stepinfo, RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info ) { CheckResult cr; // Make sure we have an input stream that contains the desired field names if ( prev == null || prev.size() == 0 ) { cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, HADOOP_EXIT_META_CHECK_RESULT_NO_DATA_STREAM ), stepinfo ); //$NON-NLS-1$ remarks.add( cr ); } else { List<String> fieldnames = Arrays.asList( prev.getFieldNames() ); HadoopExitMeta stepMeta = (HadoopExitMeta) stepinfo.getStepMetaInterface(); if ( ( stepMeta.getOutKeyFieldname() == null ) || stepMeta.getOutValueFieldname() == null ) { cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, HADOOP_EXIT_META_CHECK_RESULT_NO_SPECIFIED_FIELDS, prev.size() + "" ), stepinfo ); //$NON-NLS-1$ //$NON-NLS-2$ remarks.add( cr ); } else { if ( fieldnames.contains( stepMeta.getOutKeyFieldname() ) && fieldnames.contains( stepMeta.getOutValueFieldname() ) ) { cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( PKG, HADOOP_EXIT_META_CHECK_RESULT_STEP_RECEVING_DATA, prev.size() + "" ), stepinfo ); //$NON-NLS-1$ //$NON-NLS-2$ remarks.add( cr ); } else { cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, HADOOP_EXIT_META_CHECK_RESULT_NOT_RECEVING_SPECIFIED_FIELDS, prev.size() + "" ), stepinfo ); //$NON-NLS-1$ //$NON-NLS-2$ remarks.add( cr ); } } } } public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, Trans trans ) { return new HadoopExit( stepMeta, stepDataInterface, cnr, tr, trans ); } public StepDataInterface getStepData() { return new HadoopExitData(); } public String getOutKeyFieldname() { return outKeyFieldname; } public void setOutKeyFieldname( String arg ) { outKeyFieldname = arg; } public String getOutValueFieldname() { return outValueFieldname; } public void setOutValueFieldname( String arg ) { outValueFieldname = arg; } @Override public String getDialogClassName() { return DIALOG_NAME; } }
stepanovdg/big-data-plugin
kettle-plugins/mapreduce/src/main/java/org/pentaho/big/data/kettle/plugins/mapreduce/step/exit/HadoopExitMeta.java
Java
apache-2.0
9,368
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.wicket; import java.util.Arrays; import java.util.HashMap; import java.util.Locale; import java.util.MissingResourceException; import org.apache.wicket.markup.html.WebPage; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.form.DropDownChoice; import org.apache.wicket.markup.html.form.Form; import org.apache.wicket.model.IModel; import org.apache.wicket.model.Model; import org.apache.wicket.model.PropertyModel; import org.apache.wicket.resource.DummyApplication; import org.apache.wicket.resource.loader.ComponentStringResourceLoader; import org.apache.wicket.settings.IResourceSettings; import org.apache.wicket.util.string.Strings; import org.apache.wicket.util.tester.WicketTester; import org.apache.wicket.util.value.ValueMap; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; /** * Test cases for the <code>Localizer</code> class. * * @author Chris Turner */ public class LocalizerTest extends Assert { private static class MyMockPage extends WebPage { private static final long serialVersionUID = 1L; DropDownChoice<String> drop1; DropDownChoice<String> drop2; /** * Construct. */ public MyMockPage() { final Form<Void> form = new Form<Void>("form"); add(form); String[] choices = { "choice1", "choice2" }; drop1 = new DropDownChoice<String>("drop1", Arrays.asList(choices)); drop2 = new DropDownChoice<String>("drop2", Arrays.asList(choices)); form.add(drop1); form.add(drop2); } } private WicketTester tester; private IResourceSettings settings; protected Localizer localizer; /** * * @throws Exception */ @Before public void setUp() throws Exception { tester = new WicketTester(new DummyApplication()); settings = tester.getApplication().getResourceSettings(); localizer = tester.getApplication().getResourceSettings().getLocalizer(); } @After public void tearDown() throws Exception { tester.destroy(); } /** * */ @Test public void testGetStringValidString() { Assert.assertEquals("Expected string should be returned", "This is a test", localizer.getString("test.string", null, null, "DEFAULT")); } /** * */ @Test public void testGetStringMissingStringReturnDefault() { settings.setUseDefaultOnMissingResource(true); Assert.assertEquals("Default string should be returned", "DEFAULT", localizer.getString("unknown.string", null, null, "DEFAULT")); } /** * */ @Test public void testGetStringMissingStringNoDefault() { settings.setUseDefaultOnMissingResource(true); settings.setThrowExceptionOnMissingResource(false); Assert.assertEquals("Wrapped key should be returned on no default", "[Warning: Property for 'unknown.string' not found]", localizer.getString("unknown.string", null, null, null)); } /** * */ @Test public void testGetStringMissingStringDoNotUseDefault() { settings.setUseDefaultOnMissingResource(false); settings.setThrowExceptionOnMissingResource(false); Assert.assertEquals("Wrapped key should be returned on not using default and no exception", "[Warning: Property for 'unknown.string' not found]", localizer.getString("unknown.string", null, null, "DEFAULT")); } /** * */ @Test public void testGetStringMissingStringExceptionThrown() { settings.setUseDefaultOnMissingResource(false); settings.setThrowExceptionOnMissingResource(true); try { localizer.getString("unknown.string", null, null, "DEFAULT"); Assert.fail("MissingResourceException expected"); } catch (MissingResourceException e) { // Expected result } } /** * */ @Test public void testGetStringPropertySubstitution() { ValueMap vm = new ValueMap(); vm.put("user", "John Doe"); IModel<ValueMap> model = new Model<ValueMap>(vm); Assert.assertEquals("Property substitution should occur", "Welcome, John Doe", localizer.getString("test.substitute", null, model, null)); } /** * */ @Test public void testInComponentConstructor() { new MyLabel("myLabel"); } /** * Unit test for bug number [1416582] Resource loading caches wrong. */ @Test public void testTwoComponents() { Session.get().setLocale(Locale.ENGLISH); MyMockPage page = new MyMockPage(); Application.get() .getResourceSettings() .getStringResourceLoaders() .add(new ComponentStringResourceLoader()); Localizer localizer = Application.get().getResourceSettings().getLocalizer(); assertEquals("value 1", localizer.getString("null", page.drop1)); assertEquals("value 2", localizer.getString("null", page.drop2)); Session.get().setLocale(new Locale("nl")); assertEquals("waarde 1", localizer.getString("null", page.drop1)); assertEquals("waarde 2", localizer.getString("null", page.drop2)); } /** * */ @Test public void testGetStringUseModel() { HashMap<String, String> model = new HashMap<String, String>(); model.put("user", "juergen"); Assert.assertEquals("Expected string should be returned", "Welcome, juergen", localizer.getString("test.substitute", null, new PropertyModel<String>(model, null), "DEFAULT {user}")); Assert.assertEquals("Expected string should be returned", "DEFAULT juergen", localizer.getString("test.substituteDoesNotExist", null, new PropertyModel<String>( model, null), "DEFAULT ${user}")); } /** * See https://issues.apache.org/jira/browse/WICKET-1851 */ @Test public void test_1851_1() { MyMockPage page = new MyMockPage(); tester.getApplication().getResourceSettings().setThrowExceptionOnMissingResource(false); tester.getApplication().getResourceSettings().setUseDefaultOnMissingResource(false); String option = localizer.getStringIgnoreSettings("dummy.null", page.drop1, null, "default"); assertEquals(option, "default"); option = localizer.getStringIgnoreSettings("dummy.null", page.drop1, null, null); assertNull(option); if (Strings.isEmpty(option)) { option = localizer.getString("null", page.drop1, "CHOOSE_ONE"); } assertEquals(option, "value 1"); tester.getApplication().getResourceSettings().setThrowExceptionOnMissingResource(false); tester.getApplication().getResourceSettings().setUseDefaultOnMissingResource(false); option = localizer.getString("dummy.null", page.drop1, null, "default"); assertEquals(option, "[Warning: Property for 'dummy.null' not found]"); tester.getApplication().getResourceSettings().setThrowExceptionOnMissingResource(true); tester.getApplication().getResourceSettings().setUseDefaultOnMissingResource(true); option = localizer.getString("dummy.null", page.drop1, null, "default"); assertEquals(option, "default"); try { localizer.getString("dummy.null", page.drop1, null, null); assertTrue("Expected an exception to happen", false); } catch (MissingResourceException ex) { assertEquals( ex.getMessage(), "Unable to find property: 'dummy.null' for component: form:drop1 [class=org.apache.wicket.markup.html.form.DropDownChoice]"); } } /** * Test label. */ public static class MyLabel extends Label { private static final long serialVersionUID = 1L; /** * Construct. * * @param id */ public MyLabel(final String id) { super(id); Localizer localizer = Application.get().getResourceSettings().getLocalizer(); // should work properly in a component constructor (without parent) // as well Assert.assertEquals("Expected string should be returned", "This is a test", localizer.getString("test.string", this, "DEFAULT")); } } }
martin-g/wicket-osgi
wicket-core/src/test/java/org/apache/wicket/LocalizerTest.java
Java
apache-2.0
8,399
# -*- coding: utf-8 -*- # # Copyright 2013, Qunar OPSDEV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: zhen.pei <zhen.pei@qunar.com> # Author: Jianing Yang <jianing.yang@qunar.com> # from qg.core import gettextutils gettextutils.install('testing', lazy=True) from oslo_config import cfg from testtools import TestCase import os CONF = cfg.CONF class TestGettext(TestCase): def setUp(self): # TODO(jianingy): 自动设置境变量 TESTING_LOCALEDIR, 测试用例里 locale # 用中文 localedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'locale')) cmd = ("msgfmt -o %s/zh_CN/LC_MESSAGES/testing.mo " "%s/zh_CN/LC_MESSAGES/testing.po") % (localedir, localedir) os.system(cmd) os.environ['TESTING_LOCALEDIR'] = localedir os.environ['LC_ALL'] = 'zh_CN.UTF-8' CONF.set_default('domain', 'testing', 'i18n') super(TestGettext, self).setUp() def test_gettext_without_translation(self): self.assertEqual(_('Hello'), 'Hello') def test_gettext_with_translation(self): self.assertEqual(_('Hello, world'), u'世界你好')
shadow4125/qg.core
tests/unit/test_gettextutils.py
Python
apache-2.0
1,757
# -*- coding: utf-8 -*- # Copyright 2013 UNED # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv from HTMLParser import HTMLParser from optparse import make_option import StringIO from zipfile import ZipFile, ZipInfo from django.core.management.base import BaseCommand, CommandError from moocng.badges.models import Award from moocng.courses.models import Course class Command(BaseCommand): help = ("create a zip bundle with csv files with asigned badges per course") option_list = BaseCommand.option_list + ( make_option('-f', '--filename', action='store', dest='filename', default="", help="Filename.zip to save the csv files"), ) def error(self, message): self.stderr.write("%s\n" % message.encode("ascii", "replace")) def message(self, message): self.stdout.write("%s\n" % message.encode("ascii", "replace")) def handle(self, *args, **options): if not options["filename"]: raise CommandError("-f filename.zip is required") courses = Course.objects.all() if not courses: raise CommandError("Courses not found") if options["filename"].endswith(".zip"): self.filename = options["filename"] else: self.filename = "%s.zip" % options["filename"] h = HTMLParser() zip = ZipFile(self.filename, mode="w") awards_file = StringIO.StringIO() awards_csv = csv.writer(awards_file, quoting=csv.QUOTE_ALL) headers = ["Course", "Badge", "Number of awards"] awards_csv.writerow(headers) for course in courses: self.message("Calculatiing awards for course %s" % course.slug) awards_counter = 0 badge_name = u'' if not course.completion_badge is None: awards_counter = Award.objects.filter(badge=course.completion_badge).count() badge_name = h.unescape(course.completion_badge.title.encode("ascii", "ignore")) row = [] row.append(h.unescape(course.name.encode("ascii", "ignore"))) row.append(badge_name) row.append(awards_counter) awards_csv.writerow(row) awards_file.seek(0) awards_fileinfo = ZipInfo("awards.csv") zip.writestr(awards_fileinfo, awards_file.read()) awards_file.close() zip.close() self.message("Created %s file" % self.filename)
OpenMOOC/moocng
moocng/courses/management/commands/csv_awards_by_course.py
Python
apache-2.0
3,018
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/intstr" ) // describes the attributes of a scale subresource type ScaleSpec struct { // desired number of instances for the scaled object. Replicas int32 `json:"replicas,omitempty"` } // represents the current status of a scale subresource. type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas"` // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector map[string]string `json:"selector,omitempty"` } // +genclient=true,noMethods=true // represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. v1.ObjectMeta `json:"metadata,omitempty"` // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty"` // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty"` } // Dummy definition type ReplicationControllerDummy struct { unversioned.TypeMeta `json:",inline"` } // SubresourceReference contains enough information to let you inspect or modify the referred subresource. type SubresourceReference struct { // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" Kind string `json:"kind,omitempty"` // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names Name string `json:"name,omitempty"` // API version of the referent APIVersion string `json:"apiVersion,omitempty"` // Subresource name of the referent Subresource string `json:"subresource,omitempty"` } type CPUTargetUtilization struct { // fraction of the requested CPU that should be utilized/used, // e.g. 70 means that 70% of the requested CPU should be in use. TargetPercentage int32 `json:"targetPercentage"` } // Alpha-level support for Custom Metrics in HPA (as annotations). type CustomMetricTarget struct { // Custom Metric name. Name string `json:"name"` // Custom Metric value (average). TargetValue resource.Quantity `json:"value"` } type CustomMetricTargetList struct { Items []CustomMetricTarget `json:"items"` } type CustomMetricCurrentStatus struct { // Custom Metric name. Name string `json:"name"` // Custom Metric value (average). CurrentValue resource.Quantity `json:"value"` } type CustomMetricCurrentStatusList struct { Items []CustomMetricCurrentStatus `json:"items"` } // specification of a horizontal pod autoscaler. type HorizontalPodAutoscalerSpec struct { // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, // and will set the desired number of pods by modifying its spec. ScaleRef SubresourceReference `json:"scaleRef"` // lower limit for the number of pods that can be set by the autoscaler, default 1. MinReplicas *int32 `json:"minReplicas,omitempty"` // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. MaxReplicas int32 `json:"maxReplicas"` // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified it defaults to the target CPU utilization at 80% of the requested resources. CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty"` } // current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { // most recent generation observed by this autoscaler. ObservedGeneration *int64 `json:"observedGeneration,omitempty"` // last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` // current number of replicas of pods managed by this autoscaler. CurrentReplicas int32 `json:"currentReplicas"` // desired number of replicas of pods managed by this autoscaler. DesiredReplicas int32 `json:"desiredReplicas"` // current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` } // +genclient=true // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` // current information about the autoscaler. Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` } // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. unversioned.ListMeta `json:"metadata,omitempty"` // list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items"` } // +genclient=true // A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource // types to the API. It consists of one or more Versions of the api. type ThirdPartyResource struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata v1.ObjectMeta `json:"metadata,omitempty"` // Description is the description of this object. Description string `json:"description,omitempty"` // Versions are versions for this third party object Versions []APIVersion `json:"versions,omitempty"` } // ThirdPartyResourceList is a list of ThirdPartyResources. type ThirdPartyResourceList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of ThirdPartyResources. Items []ThirdPartyResource `json:"items"` } // An APIVersion represents a single concrete version of an object model. type APIVersion struct { // Name of this version (e.g. 'v1'). Name string `json:"name,omitempty"` // The API group to add this object into, default 'experimental'. APIGroup string `json:"apiGroup,omitempty"` } // An internal object, used for versioned storage in etcd. Not exposed to the end user. type ThirdPartyResourceData struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata. v1.ObjectMeta `json:"metadata,omitempty"` // Data is the raw JSON data for this data. Data []byte `json:"data,omitempty"` } // +genclient=true // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata. v1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Deployment. Spec DeploymentSpec `json:"spec,omitempty"` // Most recently observed status of the Deployment. Status DeploymentStatus `json:"status,omitempty"` } // DeploymentSpec is the specification of the desired behavior of the Deployment. type DeploymentSpec struct { // Number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. Replicas *int32 `json:"replicas,omitempty"` // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. Selector *LabelSelector `json:"selector,omitempty"` // Template describes the pods that will be created. Template v1.PodTemplateSpec `json:"template"` // The deployment strategy to use to replace existing pods with new ones. Strategy DeploymentStrategy `json:"strategy,omitempty"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) MinReadySeconds int32 `json:"minReadySeconds,omitempty"` // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` // Indicates that the deployment is paused and will not be processed by the // deployment controller. Paused bool `json:"paused,omitempty"` // The config this deployment is rolling back to. Will be cleared after rollback is done. RollbackTo *RollbackConfig `json:"rollbackTo,omitempty"` } // DeploymentRollback stores the information required to rollback a deployment. type DeploymentRollback struct { unversioned.TypeMeta `json:",inline"` // Required: This must match the Name of a deployment. Name string `json:"name"` // The annotations to be updated to a deployment UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty"` // The config of this deployment rollback. RollbackTo RollbackConfig `json:"rollbackTo"` } type RollbackConfig struct { // The revision to rollback to. If set to 0, rollbck to the last revision. Revision int64 `json:"revision,omitempty"` } const ( // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added // to existing RCs (and label key that is added to its pods) to prevent the existing RCs // to select new pods (and old pods being select by new RC). DefaultDeploymentUniqueLabelKey string = "pod-template-hash" ) // DeploymentStrategy describes how to replace existing pods with new ones. type DeploymentStrategy struct { // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. Type DeploymentStrategyType `json:"type,omitempty"` // Rolling update config params. Present only if DeploymentStrategyType = // RollingUpdate. //--- // TODO: Update this to follow our convention for oneOf, whatever we decide it // to be. RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty"` } type DeploymentStrategyType string const ( // Kill all existing pods before creating new ones. RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" ) // Spec to control the desired behavior of rolling update. type RollingUpdateDeployment struct { // The maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. // This can not be 0 if MaxSurge is 0. // By default, a fixed value of 1 is used. // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods // immediately when the rolling update starts. Once new pods are ready, old RC // can be scaled down further, followed by scaling up the new RC, ensuring // that the total number of pods available at all times during the update is at // least 70% of desired pods. MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` // The maximum number of pods that can be scheduled above the desired number of // pods. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // This can not be 0 if MaxUnavailable is 0. // Absolute number is calculated from percentage by rounding up. // By default, a value of 1 is used. // Example: when this is set to 30%, the new RC can be scaled up immediately when // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is atmost 130% of desired pods. MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } // DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { // The generation observed by the deployment controller. ObservedGeneration int64 `json:"observedGeneration,omitempty"` // Total number of non-terminated pods targeted by this deployment (their labels match the selector). Replicas int32 `json:"replicas,omitempty"` // Total number of non-terminated pods targeted by this deployment that have the desired template spec. UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. AvailableReplicas int32 `json:"availableReplicas,omitempty"` // Total number of unavailable pods targeted by this deployment. UnavailableReplicas int32 `json:"unavailableReplicas,omitempty"` } // DeploymentList is a list of Deployments. type DeploymentList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of Deployments. Items []Deployment `json:"items"` } // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. /* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out. type DaemonSetUpdateStrategy struct { // Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate. Type DaemonSetUpdateStrategyType `json:"type,omitempty"` // Rolling update config params. Present only if DaemonSetUpdateStrategy = // RollingUpdate. //--- // TODO: Update this to follow our convention for oneOf, whatever we decide it // to be. Same as DeploymentStrategy.RollingUpdate. RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"` } type DaemonSetUpdateStrategyType string const ( // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" ) // Spec to control the desired behavior of daemon set rolling update. type RollingUpdateDaemonSet struct { // The maximum number of DaemonSet pods that can be unavailable during the // update. Value can be an absolute number (ex: 5) or a percentage of total // number of DaemonSet pods at the start of the update (ex: 10%). Absolute // number is calculated from percentage by rounding up. // This cannot be 0. // Default value is 1. // Example: when this is set to 30%, 30% of the currently running DaemonSet // pods can be stopped for an update at any given time. The update starts // by stopping at most 30% of the currently running DaemonSet pods and then // brings up new DaemonSet pods in their place. Once the new pods are ready, // it then proceeds onto other DaemonSet pods, thus ensuring that at least // 70% of original number of DaemonSet pods are available at all times // during the update. MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` // Minimum number of seconds for which a newly created DaemonSet pod should // be ready without any of its container crashing, for it to be considered // available. Defaults to 0 (pod will be considered available as soon as it // is ready). MinReadySeconds int32 `json:"minReadySeconds,omitempty"` } */ // DaemonSetSpec is the specification of a daemon set. type DaemonSetSpec struct { // Selector is a label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector *LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template Template v1.PodTemplateSpec `json:"template"` // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. /* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out. // Update strategy to replace existing DaemonSet pods with new pods. UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"` // Label key that is added to DaemonSet pods to distinguish between old and // new pod templates during DaemonSet update. // Users can set this to an empty string to indicate that the system should // not add any label. If unspecified, system uses // DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash"). // Value of this key is hash of DaemonSetSpec.PodTemplateSpec. // No label is added if this is set to empty string. UniqueLabelKey *string `json:"uniqueLabelKey,omitempty"` */ } const ( // DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added // to daemon set pods to distinguish between old and new pod templates during // DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information. DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash" ) // DaemonSetStatus represents the current status of a daemon set. type DaemonSetStatus struct { // CurrentNumberScheduled is the number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. // More info: http://releases.k8s.io/HEAD/docs/admin/daemon.md CurrentNumberScheduled int32 `json:"currentNumberScheduled"` // NumberMisscheduled is the number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. // More info: http://releases.k8s.io/HEAD/docs/admin/daemon.md NumberMisscheduled int32 `json:"numberMisscheduled"` // DesiredNumberScheduled is the total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). // More info: http://releases.k8s.io/HEAD/docs/admin/daemon.md DesiredNumberScheduled int32 `json:"desiredNumberScheduled"` } // +genclient=true // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired behavior of this daemon set. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Spec DaemonSetSpec `json:"spec,omitempty"` // Status is the current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Status DaemonSetStatus `json:"status,omitempty"` } // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is a list of daemon sets. Items []DaemonSet `json:"items"` } // ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. type ThirdPartyResourceDataList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of ThirdpartyResourceData. Items []ThirdPartyResourceData `json:"items"` } // +genclient=true // Job represents the configuration of a single job. type Job struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` // Spec is a structure defining the expected behavior of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Spec JobSpec `json:"spec,omitempty"` // Status is a structure describing current status of a job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Status JobStatus `json:"status,omitempty"` } // JobList is a collection of jobs. type JobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of Job. Items []Job `json:"items"` } // JobSpec describes how the job execution will look like. type JobSpec struct { // Parallelism specifies the maximum desired number of pods the job should // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md Parallelism *int32 `json:"parallelism,omitempty"` // Completions specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. Completions *int32 `json:"completions,omitempty"` // Optional duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` // Selector is a label query over pods that should match the pod count. // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector *LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created when // executing a job. // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md Template v1.PodTemplateSpec `json:"template"` } // JobStatus represents the current state of a Job. type JobStatus struct { // Conditions represent the latest available observations of an object's current state. // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` // StartTime represents time when the job was acknowledged by the Job Manager. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. StartTime *unversioned.Time `json:"startTime,omitempty"` // CompletionTime represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. CompletionTime *unversioned.Time `json:"completionTime,omitempty"` // Active is the number of actively running pods. Active int32 `json:"active,omitempty"` // Succeeded is the number of pods which reached Phase Succeeded. Succeeded int32 `json:"succeeded,omitempty"` // Failed is the number of pods which reached Phase Failed. Failed int32 `json:"failed,omitempty"` } type JobConditionType string // These are valid conditions of a job. const ( // JobComplete means the job has completed its execution. JobComplete JobConditionType = "Complete" // JobFailed means the job has failed its execution. JobFailed JobConditionType = "Failed" ) // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. Type JobConditionType `json:"type"` // Status of the condition, one of True, False, Unknown. Status v1.ConditionStatus `json:"status"` // Last time the condition was checked. LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` // Last time the condition transit from one status to another. LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` // (brief) reason for the condition's last transition. Reason string `json:"reason,omitempty"` // Human readable message indicating details about last transition. Message string `json:"message,omitempty"` } // +genclient=true // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. type Ingress struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` // Spec is the desired state of the Ingress. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Spec IngressSpec `json:"spec,omitempty"` // Status is the current state of the Ingress. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Status IngressStatus `json:"status,omitempty"` } // IngressList is a collection of Ingress. type IngressList struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of Ingress. Items []Ingress `json:"items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { // A default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. Backend *IngressBackend `json:"backend,omitempty"` // TLS configuration. Currently the Ingress only supports a single TLS // port, 443, and assumes TLS termination. If multiple members of this // list specify different hosts, they will be multiplexed on the same // port according to the hostname specified through the SNI TLS extension. TLS []IngressTLS `json:"tls,omitempty"` // A list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. Rules []IngressRule `json:"rules,omitempty"` // TODO: Add the ability to specify load-balancer IP through claims } // IngressTLS describes the transport layer security associated with an Ingress. type IngressTLS struct { // Hosts are a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. Hosts []string `json:"hosts,omitempty"` // SecretName is the name of the secret used to terminate SSL traffic on 443. // Field is left optional to allow SSL routing based on SNI hostname alone. // If the SNI host in a listener conflicts with the "Host" header field used // by an IngressRule, the SNI host is used for termination and value of the // Host header is used for routing. SecretName string `json:"secretName,omitempty"` // TODO: Consider specifying different modes of termination, protocols etc. } // IngressStatus describe the current state of the Ingress. type IngressStatus struct { // LoadBalancer contains the current status of the load-balancer. LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty"` } // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { // Host is the fully qualified domain name of a network host, as defined // by RFC 3986. Note the following deviations from the "host" part of the // URI as defined in the RFC: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the // IP in the Spec of the parent Ingress. // 2. The `:` delimiter is not respected because ports are not allowed. // Currently the port of an Ingress is implicitly :80 for http and // :443 for https. // Both these may change in the future. // Incoming requests are matched against the host before the IngressRuleValue. // If the host is unspecified, the Ingress routes all traffic based on the // specified IngressRuleValue. Host string `json:"host,omitempty"` // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. IngressRuleValue `json:",inline,omitempty"` } // IngressRuleValue represents a rule to apply against incoming requests. If the // rule is satisfied, the request is routed to the specified backend. Currently // mixing different types of rules in a single Ingress is disallowed, so exactly // one of the following must be set. type IngressRuleValue struct { //TODO: // 1. Consider renaming this resource and the associated rules so they // aren't tied to Ingress. They can be used to route intra-cluster traffic. // 2. Consider adding fields for ingress-type specific global options // usable by a loadbalancer, like http keep-alive. HTTP *HTTPIngressRuleValue `json:"http,omitempty"` } // HTTPIngressRuleValue is a list of http selectors pointing to backends. // In the example: http://<host>/<path>?<searchpart> -> backend where // where parts of the url correspond to RFC 3986, this resource will be used // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { // A collection of paths that map requests to backends. Paths []HTTPIngressPath `json:"paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. } // HTTPIngressPath associates a path regex with a backend. Incoming urls matching // the path are forwarded to the backend. type HTTPIngressPath struct { // Path is a extended POSIX regex as defined by IEEE Std 1003.1, // (i.e this follows the egrep/unix syntax, not the perl syntax) // matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" // part of a URL as defined by RFC 3986. Paths must begin with // a '/'. If unspecified, the path defaults to a catch all sending // traffic to the backend. Path string `json:"path,omitempty"` // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { // Specifies the name of the referenced service. ServiceName string `json:"serviceName"` // Specifies the port of the referenced service. ServicePort intstr.IntOrString `json:"servicePort"` } type NodeResource string const ( // Percentage of node's CPUs that is currently used. CpuConsumption NodeResource = "CpuConsumption" // Percentage of node's CPUs that is currently requested for pods. CpuRequest NodeResource = "CpuRequest" // Percentage od node's memory that is currently used. MemConsumption NodeResource = "MemConsumption" // Percentage of node's CPUs that is currently requested for pods. MemRequest NodeResource = "MemRequest" ) // NodeUtilization describes what percentage of a particular resource is used on a node. type NodeUtilization struct { Resource NodeResource `json:"resource"` // The accepted values are from 0 to 1. Value float64 `json:"value"` } // Configuration of the Cluster Autoscaler type ClusterAutoscalerSpec struct { // Minimum number of nodes that the cluster should have. MinNodes int32 `json:"minNodes"` // Maximum number of nodes that the cluster should have. MaxNodes int32 `json:"maxNodes"` // Target average utilization of the cluster nodes. New nodes will be added if one of the // targets is exceeded. Cluster size will be decreased if the current utilization is too low // for all targets. TargetUtilization []NodeUtilization `json:"target"` } type ClusterAutoscaler struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata // For now (experimental api) it is required that the name is set to "ClusterAutoscaler" and namespace is "default". v1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired behavior of this daemon set. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Spec ClusterAutoscalerSpec `json:"spec,omitempty"` } // There will be just one (or none) ClusterAutoscaler. type ClusterAutoscalerList struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` Items []ClusterAutoscaler `json:"items"` } // ExportOptions is the query options to the standard REST get call. type ExportOptions struct { unversioned.TypeMeta `json:",inline"` // Should this value be exported. Export strips fields that a user can not specify. Export bool `json:"export"` // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' Exact bool `json:"exact"` } // ListOptions is the query options to a standard REST list call. type ListOptions struct { unversioned.TypeMeta `json:",inline"` // A selector to restrict the list of returned objects by their labels. // Defaults to everything. LabelSelector string `json:"labelSelector,omitempty"` // A selector to restrict the list of returned objects by their fields. // Defaults to everything. FieldSelector string `json:"fieldSelector,omitempty"` // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. Watch bool `json:"watch,omitempty"` // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. ResourceVersion string `json:"resourceVersion,omitempty"` // Timeout for the list/watch call. TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` } // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. type LabelSelector struct { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the // operator is "In", and the values array contains only "value". The requirements are ANDed. MatchLabels map[string]string `json:"matchLabels,omitempty"` // matchExpressions is a list of label selector requirements. The requirements are ANDed. MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty"` } // A label selector requirement is a selector that contains values, a key, and an operator that // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` // operator represents a key's relationship to a set of values. // Valid operators ard In, NotIn, Exists and DoesNotExist. Operator LabelSelectorOperator `json:"operator"` // values is an array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be empty. This array is replaced during a strategic // merge patch. Values []string `json:"values,omitempty"` } // A label selector operator is the set of operators that can be used in a selector requirement. type LabelSelectorOperator string const ( LabelSelectorOpIn LabelSelectorOperator = "In" LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" LabelSelectorOpExists LabelSelectorOperator = "Exists" LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) // +genclient=true // ReplicaSet represents the configuration of a ReplicaSet. type ReplicaSet struct { unversioned.TypeMeta `json:",inline"` // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Spec ReplicaSetSpec `json:"spec,omitempty"` // Status is the most recently observed status of the ReplicaSet. // This data may be out of date by some window of time. // Populated by the system. // Read-only. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Status ReplicaSetStatus `json:"status,omitempty"` } // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty"` // List of ReplicaSets. // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md Items []ReplicaSet `json:"items"` } // ReplicaSetSpec is the specification of a ReplicaSet. type ReplicaSetSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller Replicas *int32 `json:"replicas,omitempty"` // Selector is a label query over pods that should match the replica count. // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector *LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template Template *v1.PodTemplateSpec `json:"template,omitempty"` } // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { // Replicas is the most recently oberved number of replicas. // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller Replicas int32 `json:"replicas"` // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. ObservedGeneration int64 `json:"observedGeneration,omitempty"` } // Pod Security Policy governs the ability to make requests that affect the Security Context // that will be applied to a pod and container. type PodSecurityPolicy struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` // spec defines the policy enforced. Spec PodSecurityPolicySpec `json:"spec,omitempty"` } // Pod Security Policy Spec defines the policy enforced. type PodSecurityPolicySpec struct { // privileged determines if a pod can request to be run as privileged. Privileged bool `json:"privileged,omitempty"` // capabilities is a list of capabilities that can be added. Capabilities []v1.Capability `json:"capabilities,omitempty"` // volumes is a white list of allowed volume plugins. Empty indicates that all plugins // may be used. Volumes []FSType `json:"volumes,omitempty"` // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. HostNetwork bool `json:"hostNetwork,omitempty"` // hostPorts determines which host port ranges are allowed to be exposed. HostPorts []HostPortRange `json:"hostPorts,omitempty"` // hostPID determines if the policy allows the use of HostPID in the pod spec. HostPID bool `json:"hostPID,omitempty"` // hostIPC determines if the policy allows the use of HostIPC in the pod spec. HostIPC bool `json:"hostIPC,omitempty"` // seLinuxContext is the strategy that will dictate the allowable labels that may be set. SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty"` } // FS Type gives strong typing to different file systems that are used by volumes. type FSType string var ( HostPath FSType = "hostPath" EmptyDir FSType = "emptyDir" GCEPersistentDisk FSType = "gcePersistentDisk" AWSElasticBlockStore FSType = "awsElasticBlockStore" GitRepo FSType = "gitRepo" Secret FSType = "secret" NFS FSType = "nfs" ISCSI FSType = "iscsi" Glusterfs FSType = "glusterfs" PersistentVolumeClaim FSType = "persistentVolumeClaim" RBD FSType = "rbd" Cinder FSType = "cinder" CephFS FSType = "cephFS" DownwardAPI FSType = "downwardAPI" FC FSType = "fc" ) // Host Port Range defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { // min is the start of the range, inclusive. Min int32 `json:"min"` // max is the end of the range, inclusive. Max int32 `json:"max"` } // SELinux Context Strategy Options defines the strategy type and any options used to create the strategy. type SELinuxContextStrategyOptions struct { // type is the strategy that will dictate the allowable labels that may be set. Type SELinuxContextStrategy `json:"type"` // seLinuxOptions required to run as; required for MustRunAs // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty"` } // SELinux Context Strategy Type denotes strategy types for generating SELinux options for a // Security Context. type SELinuxContextStrategy string const ( // container must have SELinux labels of X applied. SELinuxStrategyMustRunAs SELinuxContextStrategy = "MustRunAs" // container may make requests for any SELinux context labels. SELinuxStrategyRunAsAny SELinuxContextStrategy = "RunAsAny" ) // Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. type RunAsUserStrategyOptions struct { // type is the strategy that will dictate the allowable RunAsUser values that may be set. Type RunAsUserStrategy `json:"type"` // Ranges are the allowed ranges of uids that may be used. Ranges []IDRange `json:"ranges,omitempty"` } // ID Range provides a min/max of an allowed range of IDs. type IDRange struct { // Min is the start of the range, inclusive. Min int64 `json:"min"` // Max is the end of the range, inclusive. Max int64 `json:"max"` } // Run As User Strategy Type denotes strategy types for generating RunAsUser values for a // Security Context. type RunAsUserStrategy string const ( // container must run as a particular uid. RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" // container must run as a non-root uid RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" // container may make requests for any uid. RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) // Pod Security Policy List is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://docs.k8s.io/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is a list of schema objects. Items []PodSecurityPolicy `json:"items"` }
satnam6502/kubernetes
pkg/apis/extensions/v1beta1/types.go
GO
apache-2.0
46,181
/* * Copyright (C) 2015-2017 NS Solutions Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.htmlhifive.pitalium.it.assertion.partialPage; import static org.junit.Assert.*; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import com.htmlhifive.pitalium.core.model.ScreenshotArgument; import com.htmlhifive.pitalium.it.assertion.PtlItAssertionTestBase; /** * 条件が異なる場合の比較テスト(要素のボーダーが異なる) */ public class CompareDifferentBorderTest extends PtlItAssertionTestBase { @Rule public ExpectedException expectedException = ExpectedException.none(); /** * 同一要素のボーダーの幅を変更して比較する。 * * @ptl.expect 差分が発生すること。 */ @Test public void compareSameElementWhichHasDifferentBorderWidth() throws Exception { openBasicTextPage(); if (isRunTest()) { driver.executeJavaScript("" + "var element = document.getElementById('textColumn0');" + "element.style.border = '5px black solid';"); } else { driver.executeJavaScript("" + "var element = document.getElementById('textColumn0');" + "element.style.border = '1px black solid';"); } ScreenshotArgument arg = ScreenshotArgument.builder("s").addNewTargetById("textColumn0").build(); if (isRunTest()) { expectedException.expect(AssertionError.class); assertionView.assertView(arg); fail(); return; } assertionView.assertView(arg); } /** * 同一要素からボーダーを削除して比較する。 * * @ptl.expect 差分が発生すること。 */ @Test public void compareSameElementWhichHasNoBorderWidth() throws Exception { openBasicTextPage(); if (isRunTest()) { driver.executeJavaScript("" + "var element = document.getElementById('textColumn0');" + "element.style.border = '0px';"); } else { driver.executeJavaScript("" + "var element = document.getElementById('textColumn0');" + "element.style.border = '1px black solid';"); } ScreenshotArgument arg = ScreenshotArgument.builder("s").addNewTargetById("textColumn0").build(); if (isRunTest()) { expectedException.expect(AssertionError.class); assertionView.assertView(arg); fail(); return; } assertionView.assertView(arg); } }
hifive/hifive-pitalium
pitalium/src/test/java/com/htmlhifive/pitalium/it/assertion/partialPage/CompareDifferentBorderTest.java
Java
apache-2.0
2,826
/* * Copyright 2019 EPAM Systems * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.epam.ta.reportportal.util; import org.springframework.stereotype.Component; import java.time.LocalDateTime; /** * Class to ease writing/testing of date-based logic. * * @author Dzianis_Shybeka */ @Component public class DateTimeProvider { public LocalDateTime localDateTimeNow() { return LocalDateTime.now(); } }
reportportal/commons-dao
src/main/java/com/epam/ta/reportportal/util/DateTimeProvider.java
Java
apache-2.0
930
import styled from 'styled-components'; import Hamburger from './Hamburger'; import { BREAKPOINT_SHOW_HAMBURGER, SIDENAV_SIZE, HAMBURER_SIZE, } from './constants'; export default styled(Hamburger)` position: absolute; top: 0; left: ${SIDENAV_SIZE}px; transform: ${(props) => props.active ? 'translateX(-70px)' : 'translateX(0)'}; width: ${HAMBURER_SIZE}px; height: ${HAMBURER_SIZE}px; z-index: 200; @media (min-width: ${BREAKPOINT_SHOW_HAMBURGER}px) { visibility: hidden; } `;
spearwolf/blitpunk
packages/kitchen-sink/src/AppShell/SideNavHamburger.js
JavaScript
apache-2.0
512
/* * Copyright 2013 MovingBlocks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.terasology.input.binds.inventory; import org.terasology.input.DefaultBinding; import org.terasology.input.InputType; import org.terasology.input.Keyboard; import org.terasology.input.RegisterBindButton; /** */ @RegisterBindButton(id = "toolbarSlot2", description = "Toolbar Slot 3") @DefaultBinding(type = InputType.KEY, id = Keyboard.KeyId.KEY_3) public class ToolbarSlot2Button extends ToolbarSlotButton { public ToolbarSlot2Button() { super(2); } }
sceptross/Terasology
engine/src/main/java/org/terasology/input/binds/inventory/ToolbarSlot2Button.java
Java
apache-2.0
1,077
package org.vaadin.addon.vol3.client.interaction; import com.vaadin.shared.communication.ServerRpc; import java.util.List; /** * Server rpc interface for the select interaction */ public interface OLSelectInteractionSRPC extends ServerRpc{ public void updateSelection(List<String> selectedFeatures); }
grandeemme/v-ol3
v-ol3/src/main/java/org/vaadin/addon/vol3/client/interaction/OLSelectInteractionSRPC.java
Java
apache-2.0
311
package api /* import ( "net" "net/http" "reflect" "sync" "testing" "github.com/Dataman-Cloud/swan/store" "github.com/gorilla/mux" ) func TestNewServer(t *testing.T) { fakeCfg := &Config{ Advertise: "hello", LogLevel: "debug", } type args struct { cfg *Config l net.Listener leader string driver Driver db store.Store } tests := []struct { name string args args want *Server }{ { name: "test", args: args{ cfg: fakeCfg, }, want: &Server{ cfg: fakeCfg, server: &http.Server{ Handler: nil, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := NewServer(tt.args.cfg, tt.args.l, tt.args.driver, tt.args.db); !reflect.DeepEqual(got.cfg, tt.want.cfg) { t.Errorf("NewServer() = %v, want %v", got, tt.want) } }) } } func TestServer_Run(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } tests := []struct { name string fields fields wantErr bool }{ //TODO: Do TestServer_Run } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } if err := s.Run(); (err != nil) != tt.wantErr { t.Errorf("Server.Run() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestServer_Shutdown(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } tests := []struct { name string fields fields wantErr bool }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } if err := s.Shutdown(); (err != nil) != tt.wantErr { t.Errorf("Server.Shutdown() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestServer_Stop(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } tests := []struct { name string fields fields wantErr bool }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } if err := s.Stop(); (err != nil) != tt.wantErr { t.Errorf("Server.Stop() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestServer_Reload(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } tests := []struct { name string fields fields wantErr bool }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } if err := s.Reload(); (err != nil) != tt.wantErr { t.Errorf("Server.Reload() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestServer_UpdateLeader(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } type args struct { leader string } tests := []struct { name string fields fields args args }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } s.UpdateLeader(tt.args.leader) }) } } func TestServer_GetLeader(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } tests := []struct { name string fields fields want string }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } if got := s.GetLeader(); got != tt.want { t.Errorf("Server.GetLeader() = %v, want %v", got, tt.want) } }) } } func TestServer_createMux(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } tests := []struct { name string fields fields want *mux.Router }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } if got := s.createMux(); !reflect.DeepEqual(got, tt.want) { t.Errorf("Server.createMux() = %v, want %v", got, tt.want) } }) } } func TestServer_enableCORS(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } type args struct { w http.ResponseWriter } tests := []struct { name string fields fields args args }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } s.enableCORS(tt.args.w) }) } } func Test_profilerSetup(t *testing.T) { type args struct { r *mux.Router path string } tests := []struct { name string args args }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { profilerSetup(tt.args.r, tt.args.path) }) } } func TestServer_makeHTTPHandler(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } type args struct { handler HandlerFunc } tests := []struct { name string fields fields args args want http.HandlerFunc }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } if got := s.makeHTTPHandler(tt.args.handler); !reflect.DeepEqual(got, tt.want) { t.Errorf("Server.makeHTTPHandler() = %v, want %v", got, tt.want) } }) } } func TestServer_forwardRequest(t *testing.T) { type fields struct { cfg *Config listener net.Listener leader string server *http.Server driver Driver db store.Store Mutex sync.Mutex } type args struct { w http.ResponseWriter r *http.Request } tests := []struct { name string fields fields args args }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &Server{ cfg: tt.fields.cfg, listener: tt.fields.listener, leader: tt.fields.leader, server: tt.fields.server, driver: tt.fields.driver, db: tt.fields.db, Mutex: tt.fields.Mutex, } s.forwardRequest(tt.args.w, tt.args.r) }) } } */
Dataman-Cloud/swan
api/server_test.go
GO
apache-2.0
8,730
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/ds/model/CreateMicrosoftADResult.h> #include <aws/core/utils/json/JsonSerializer.h> #include <aws/core/AmazonWebServiceResult.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/UnreferencedParam.h> #include <utility> using namespace Aws::DirectoryService::Model; using namespace Aws::Utils::Json; using namespace Aws::Utils; using namespace Aws; CreateMicrosoftADResult::CreateMicrosoftADResult() { } CreateMicrosoftADResult::CreateMicrosoftADResult(const AmazonWebServiceResult<JsonValue>& result) { *this = result; } CreateMicrosoftADResult& CreateMicrosoftADResult::operator =(const AmazonWebServiceResult<JsonValue>& result) { const JsonValue& jsonValue = result.GetPayload(); if(jsonValue.ValueExists("DirectoryId")) { m_directoryId = jsonValue.GetString("DirectoryId"); } return *this; }
chiaming0914/awe-cpp-sdk
aws-cpp-sdk-ds/source/model/CreateMicrosoftADResult.cpp
C++
apache-2.0
1,426
package com.redhat.repository.validator.filter; import org.eclipse.aether.artifact.Artifact; import org.eclipse.aether.artifact.DefaultArtifact; import org.junit.Test; import com.redhat.repository.validator.filter.DependencyNotFoundExceptionFilter; import com.redhat.repository.validator.impl.DependencyNotFoundException; import java.io.File; public class TestDependencyNotFoundExceptionFilter extends AbstractExceptionFilterTest { private static final String DEFAULT_MISSING_ARTIFACT_REGEX = "com.acme:finance.*:war:.*"; private static final Artifact DEFAULT_VALIDATED_ARTIFACT = new DefaultArtifact("com.acme", "acme-parent", "pom", "1.0.0"); @Test public void shouldIgnoreExceptionWithMatchingArtifact() { DependencyNotFoundExceptionFilter filter = new DependencyNotFoundExceptionFilter(DEFAULT_MISSING_ARTIFACT_REGEX); Artifact artifact = new DefaultArtifact("com.acme", "finance-stuff", "war", "1.0-redhat-2"); DependencyNotFoundException ex = new DependencyNotFoundException(new Exception(), artifact, DEFAULT_VALIDATED_ARTIFACT); assertExceptionIgnored(filter, ex, new File("some-file")); } @Test public void shouldNotIgnoreExceptionOnlyWithMatchingArtifact() { DependencyNotFoundExceptionFilter filter = new DependencyNotFoundExceptionFilter(DEFAULT_MISSING_ARTIFACT_REGEX); // Exception != DependencyNotFoundException, so it should not be ignored Exception ex = new Exception("Some message"); assertExceptionNotIgnored(filter, ex, new File("some-file")); } @Test public void shouldNotIgnoreExceptionOnlyWithMatchingType() { DependencyNotFoundExceptionFilter filter = new DependencyNotFoundExceptionFilter(DEFAULT_MISSING_ARTIFACT_REGEX); // "wrong-finance-stuff" artifactId does not match the defined regex Artifact artifact = new DefaultArtifact("com.acme", "wrong-finance-stuff", "war", "1.0-redhat-2"); DependencyNotFoundException ex = new DependencyNotFoundException(new Exception(), artifact, DEFAULT_VALIDATED_ARTIFACT); assertExceptionNotIgnored(filter, ex, new File("some-file")); } @Test public void shouldIgnoreOnlyExceptionComingFromSpecifiedPom() { String validatedArtifactRegex = "com.acme:parent:pom:1.0.1"; DependencyNotFoundExceptionFilter filter = new DependencyNotFoundExceptionFilter(DEFAULT_MISSING_ARTIFACT_REGEX, validatedArtifactRegex); Artifact missingArtifact = new DefaultArtifact("com.acme", "finance-stuff", "war", "1.0-redhat-2"); Artifact validatedArtifact = new DefaultArtifact("com.acme", "parent", "pom", "1.0.1"); DependencyNotFoundException ex = new DependencyNotFoundException(new Exception(), missingArtifact, validatedArtifact); assertExceptionIgnored(filter, ex, new File("some-file")); // now use the validated artifact that does not match the pattern used by the filter validatedArtifact = new DefaultArtifact("com.acme", "some-artifact", "pom", "2.0"); ex = new DependencyNotFoundException(new Exception(), missingArtifact, validatedArtifact); assertExceptionNotIgnored(filter, ex, new File("some-file")); } @Test public void shouldHandleRegexWithClassifierAndArtifactWithoutClassifier() { String artifactRegexWithClassifier = "com.acme:finance.*:jar:.*:.*"; DependencyNotFoundExceptionFilter filter = new DependencyNotFoundExceptionFilter(artifactRegexWithClassifier); // the regex specifies classifier .*, it should match the empty classifier of the artifact Artifact artifact = new DefaultArtifact("com.acme", "finance-stuff", "jar", "1.0-redhat-2"); DependencyNotFoundException ex = new DependencyNotFoundException(new Exception(), artifact, DEFAULT_VALIDATED_ARTIFACT); assertExceptionIgnored(filter, ex, new File("some-file")); artifactRegexWithClassifier = "com.acme:finance.*:jar:classes:.*"; filter = new DependencyNotFoundExceptionFilter(artifactRegexWithClassifier); // the regex specifies classifier `classes`, it should _not_ match the empty classifier of the artifact artifact = new DefaultArtifact("com.acme", "finance-stuff", "jar", "1.0-redhat-2"); ex = new DependencyNotFoundException(new Exception(), artifact, DEFAULT_VALIDATED_ARTIFACT); assertExceptionNotIgnored(filter, ex, new File("some-file")); } @Test public void shouldHandleRegexWithoutClassifierAndArtifactWithClassifier() { String artifactRegexWithoutClassifier = "com.acme:finance.*:jar:.*"; DependencyNotFoundExceptionFilter filter = new DependencyNotFoundExceptionFilter(artifactRegexWithoutClassifier); // the artifact has classifier, but the regex does not, so the classifier should be ignored in the comparison // and the exception then ignored Artifact artifact = new DefaultArtifact("com.acme", "finance-stuff", "classes", "jar", "1.0-redhat-2"); DependencyNotFoundException ex = new DependencyNotFoundException(new Exception(), artifact, DEFAULT_VALIDATED_ARTIFACT); assertExceptionIgnored(filter, ex, new File("some-file")); } }
thradec/wolf-validator
src/test/java/com/redhat/repository/validator/filter/TestDependencyNotFoundExceptionFilter.java
Java
apache-2.0
5,216
/* * Copyright 2010 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.runtime.pipeline.impl; public class PipelineImpl extends BaseEmitter { private PipelineContextFactory factory; public PipelineImpl(PipelineContextFactory factory) { this.factory = factory; } public void insert(Object object) { emit( object, this.factory.newPipelineContext() ); } }
mswiderski/droolsjbpm-integration
drools-pipeline/src/main/java/org/drools/runtime/pipeline/impl/PipelineImpl.java
Java
apache-2.0
943
// javascript (closure) port (c) 2013 Manuel Braun (mb@w69b.com) /* * Copyright 2008 ZXing authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ goog.require('goog.array'); goog.require('w69b.qr.GF256'); goog.require('w69b.qr.ReedSolomonDecoder'); goog.require('w69b.qr.ReedSolomonError'); define(['chai', 'corrupt'], function(chai, corrupt) { var assert = chai.assert; describe('ReedSolomonDecoder tests', function() { var ReedSolomonDecoder = w69b.qr.ReedSolomonDecoder; /** See ISO 18004, Appendix I, from which this example is taken. */ var QR_CODE_TEST = [0x10, 0x20, 0x0C, 0x56, 0x61, 0x80, 0xEC, 0x11, 0xEC, 0x11, 0xEC, 0x11, 0xEC, 0x11, 0xEC, 0x11]; var QR_CODE_TEST_WITH_EC = [0x10, 0x20, 0x0C, 0x56, 0x61, 0x80, 0xEC, 0x11, 0xEC, 0x11, 0xEC, 0x11, 0xEC, 0x11, 0xEC, 0x11, 0xA5, 0x24, 0xD4, 0xC1, 0xED, 0x36, 0xC7, 0x87, 0x2C, 0x55]; var QR_CODE_ECC_BYTES = QR_CODE_TEST_WITH_EC.length - QR_CODE_TEST.length; var QR_CODE_CORRECTABLE = QR_CODE_ECC_BYTES / 2; var qrRSDecoder = new ReedSolomonDecoder(w69b.qr.GF256.QR_CODE_FIELD); function checkQRRSDecode(received) { qrRSDecoder.decode(received, QR_CODE_ECC_BYTES); // expect(received).to.equal(QR_CODE_TEST); for (var i = 0; i < QR_CODE_TEST.length; i++) { expect(received[i]).to.equal(QR_CODE_TEST[i]); } } it('decodes code with no errors', function() { var received = goog.array.clone(QR_CODE_TEST_WITH_EC); // no errors checkQRRSDecode(received); }); it('decodes code with one errors', function() { for (var i = 0; i < QR_CODE_TEST_WITH_EC.length; i++) { var received = goog.array.clone(QR_CODE_TEST_WITH_EC); received[i] = 0x00; // Math.round(Math.random() * 256); checkQRRSDecode(received); } }); it('decodes code with max errors', function() { for (var i = 0; i < 10; ++i) { // # iterations is kind of arbitrary var received = goog.array.clone(QR_CODE_TEST_WITH_EC); corrupt(received, QR_CODE_CORRECTABLE); checkQRRSDecode(received); } }); it('throws excpetion on failure', function() { var received = goog.array.clone(QR_CODE_TEST_WITH_EC); corrupt(received, QR_CODE_CORRECTABLE + 1); expect(function() { checkQRRSDecode(received); }).to.throw(w69b.qr.ReedSolomonError); }); }); });
dsanders11/barcode.js
tests/reedsolomondecoder.spec.js
JavaScript
apache-2.0
2,927
<?php namespace Dingo\Api\Routing; use Illuminate\Routing\Route; use Illuminate\Container\Container; class ControllerReviser { /** * Illuminate application container. * * @var \Illuminate\Container\Container */ protected $container; /** * Create a new controller reviser instance. * * @param \Illuminate\Container\Container $container */ public function __construct(Container $container = null) { $this->container = $container ?: new Container; } /** * Revise a controller route by updating the protection and scopes. * * @param \Illuminate\Routing\Route $route * @return \Illuminate\Routing\Route */ public function revise(Route $route) { if ($this->routingToController($route)) { list ($class, $method) = explode('@', $route->getActionName()); $controller = $this->resolveController($class); if ($controller instanceof Controller) { $action = $route->getAction(); $action = $this->reviseProtectedMethods($action, $controller, $method); $action = $this->reviseScopedMethods($action, $controller, $method); $route->setAction($action); } } return $route; } /** * Determine if the route is routing to a controller. * * @param \Illuminate\Routing\Route $route * @return bool */ protected function routingToController(Route $route) { return is_string(array_get($route->getAction(), 'controller')); } /** * Revise the scopes of a controller method. Scopes defined on the * controller are merged with those in the route definition. * * @param \Illuminate\Routing\Route $action * @param \Dingo\Api\Routing\Controller $controller * @param string $method * @return \Illuminate\Routing\Route */ protected function reviseScopedMethods($action, $controller, $method) { if (! isset($action['scopes'])) { $action['scopes'] = []; } $action['scopes'] = (array) $action['scopes']; $scopedMethods = $controller->getScopedMethods(); if (isset($scopedMethods['*'])) { $action['scopes'] = array_merge($action['scopes'], $scopedMethods['*']); } if (isset($scopedMethods[$method])) { $action['scopes'] = array_merge($action['scopes'], $scopedMethods[$method]); } return $action; } /** * Revise the protected state of a controller method. * * @param \Illuminate\Routing\Route $action * @param \Dingo\Api\Routing\Controller $controller * @param string $method * @return \Illuminate\Routing\Route */ protected function reviseProtectedMethods($action, $controller, $method) { if (in_array($method, $controller->getProtectedMethods())) { $action['protected'] = true; } elseif (in_array($method, $controller->getUnprotectedMethods())) { $action['protected'] = false; } return $action; } /** * Resolve a controller from the container. * * @param string $class * @return \Illuminate\Routing\Controller */ protected function resolveController($class) { $controller = $this->container->make($class); if (! $this->container->bound($class)) { $this->container->instance($class, $controller); } return $this->resolvedControllers[$class] = $controller; } }
prady00/Laravel-Swagger-REST
vendor/dingo/api/src/Routing/ControllerReviser.php
PHP
apache-2.0
3,615
package ch.sloth.dealhunter.ui; import android.content.Context; import android.support.design.widget.FloatingActionButton; import android.util.AttributeSet; import android.view.MotionEvent; import android.view.View; import android.widget.ScrollView; import java.util.ArrayList; /** * Created by martinschmidli on 29/04/16. */ public class DetailScrollView extends ScrollView { private FloatingActionButton mListener; public void addCallback(FloatingActionButton listener) { mListener = listener; } public DetailScrollView(Context context) { this(context, null, 0); } public DetailScrollView(Context context, AttributeSet attrs) { this(context, attrs, 0); } public DetailScrollView(Context context, AttributeSet attrs, int defStyle) { super(context, attrs, defStyle); } @Override public boolean onTouchEvent(MotionEvent ev) { if(mListener != null){ switch (ev.getAction()) { case MotionEvent.ACTION_DOWN: if (this.mListener.getVisibility() == View.VISIBLE) { this.mListener.hide(); } return super.onTouchEvent(ev); case MotionEvent.ACTION_UP: if (this.mListener.getVisibility() != View.VISIBLE) { this.mListener.show(); } return super.onTouchEvent(ev); default: return super.onTouchEvent(ev); } } return super.onTouchEvent(ev); } }
Team-Sloth/DealHunter
DealHunter/app/src/main/java/ch/sloth/dealhunter/ui/DetailScrollView.java
Java
apache-2.0
1,607
// Copyright 2007-2015 Chris Patterson, Dru Sellers, Travis Smith, et. al. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. namespace MassTransit.Testing.Indicators { /// <summary> /// Represents a resource which may be signaled. /// </summary> public interface ISignalResource { void Signal(); } }
jacobpovar/MassTransit
src/MassTransit/Testing/Indicators/ISignalResource.cs
C#
apache-2.0
845
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.highlight; import org.apache.lucene.search.vectorhighlight.FragListBuilder; import org.apache.solr.common.params.SolrParams; public class SimpleFragListBuilder extends HighlightingPluginBase implements SolrFragListBuilder { @Override public FragListBuilder getFragListBuilder(SolrParams params) { // NOTE: This class (currently) makes no use of params // If that ever changes, it should wrap them with defaults... // params = SolrParams.wrapDefaults(params, defaults) numRequests++; return new org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder(); } /////////////////////////////////////////////////////////////////////// //////////////////////// SolrInfoMBeans methods /////////////////////// /////////////////////////////////////////////////////////////////////// @Override public String getDescription() { return "SimpleFragListBuilder"; } @Override public String getSource() { return "$URL: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene_solr_4_7/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java $"; } }
pengzong1111/solr4
solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java
Java
apache-2.0
1,947
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.cql3.statements; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.ExecutionException; import com.google.common.collect.AbstractIterator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.auth.Permission; import org.apache.cassandra.cql3.*; import org.apache.cassandra.transport.messages.ResultMessage; import org.apache.cassandra.config.CFMetaData; import org.apache.cassandra.config.ColumnDefinition; import org.apache.cassandra.db.*; import org.apache.cassandra.db.filter.*; import org.apache.cassandra.db.marshal.*; import org.apache.cassandra.dht.*; import org.apache.cassandra.exceptions.*; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.QueryState; import org.apache.cassandra.service.RangeSliceVerbHandler; import org.apache.cassandra.service.StorageProxy; import org.apache.cassandra.service.StorageService; import org.apache.cassandra.db.ConsistencyLevel; import org.apache.cassandra.thrift.IndexExpression; import org.apache.cassandra.thrift.IndexOperator; import org.apache.cassandra.thrift.ThriftValidation; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; /** * Encapsulates a completely parsed SELECT query, including the target * column family, expression, result count, and ordering clause. * */ public class SelectStatement implements CQLStatement { private static final Logger logger = LoggerFactory.getLogger(SelectStatement.class); private final int boundTerms; public final CFDefinition cfDef; public final Parameters parameters; private final Selection selection; private final Restriction[] keyRestrictions; private final Restriction[] columnRestrictions; private final Map<CFDefinition.Name, Restriction> metadataRestrictions = new HashMap<CFDefinition.Name, Restriction>(); private Restriction sliceRestriction; private boolean isReversed; private boolean onToken; private boolean isKeyRange; private boolean keyIsInRelation; private static enum Bound { START(0), END(1); public final int idx; Bound(int idx) { this.idx = idx; } public static Bound reverse(Bound b) { return b == START ? END : START; } }; public SelectStatement(CFDefinition cfDef, int boundTerms, Parameters parameters, Selection selection) { this.cfDef = cfDef; this.boundTerms = boundTerms; this.selection = selection; this.keyRestrictions = new Restriction[cfDef.keys.size()]; this.columnRestrictions = new Restriction[cfDef.columns.size()]; this.parameters = parameters; } public int getBoundsTerms() { return boundTerms; } public void checkAccess(ClientState state) throws InvalidRequestException, UnauthorizedException { state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.SELECT); } public void validate(ClientState state) throws InvalidRequestException { // Nothing to do, all validation has been done by RawStatement.prepare() } public ResultMessage.Rows execute(ConsistencyLevel cl, QueryState state, List<ByteBuffer> variables) throws RequestExecutionException, RequestValidationException { if (cl == null) throw new InvalidRequestException("Invalid empty consistency level"); cl.validateForRead(keyspace()); List<Row> rows; if (isKeyRange) { RangeSliceCommand command = getRangeCommand(variables); rows = command == null ? Collections.<Row>emptyList() : StorageProxy.getRangeSlice(command, cl); } else { List<ReadCommand> commands = getSliceCommands(variables); rows = commands == null ? Collections.<Row>emptyList() : StorageProxy.read(commands, cl); } return processResults(rows, variables); } private ResultMessage.Rows processResults(List<Row> rows, List<ByteBuffer> variables) throws RequestValidationException { // Even for count, we need to process the result as it'll group some column together in sparse column families ResultSet rset = process(rows, variables); rset = parameters.isCount ? rset.makeCountResult() : rset; return new ResultMessage.Rows(rset); } static List<Row> readLocally(String keyspace, List<ReadCommand> cmds) { Table table = Table.open(keyspace); List<Row> rows = new ArrayList<Row>(cmds.size()); for (ReadCommand cmd : cmds) rows.add(cmd.getRow(table)); return rows; } public ResultMessage.Rows executeInternal(QueryState state) throws RequestExecutionException, RequestValidationException { try { List<ByteBuffer> variables = Collections.<ByteBuffer>emptyList(); List<Row> rows; if (isKeyRange) { RangeSliceCommand command = getRangeCommand(variables); rows = command == null ? Collections.<Row>emptyList() : RangeSliceVerbHandler.executeLocally(command); } else { List<ReadCommand> commands = getSliceCommands(variables); rows = commands == null ? Collections.<Row>emptyList() : readLocally(keyspace(), commands); } return processResults(rows, variables); } catch (ExecutionException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new RuntimeException(e); } } public ResultSet process(List<Row> rows) throws InvalidRequestException { assert !parameters.isCount; // not yet needed return process(rows, Collections.<ByteBuffer>emptyList()); } public String keyspace() { return cfDef.cfm.ksName; } public String columnFamily() { return cfDef.cfm.cfName; } private List<ReadCommand> getSliceCommands(List<ByteBuffer> variables) throws RequestValidationException { QueryPath queryPath = new QueryPath(columnFamily()); Collection<ByteBuffer> keys = getKeys(variables); if (keys.isEmpty()) // in case of IN () for (the last column of) the partition key. return null; List<ReadCommand> commands = new ArrayList<ReadCommand>(keys.size()); // ...a range (slice) of column names if (isColumnRange()) { // Note that we use the total limit for every key. This is // potentially inefficient, but then again, IN + LIMIT is not a // very sensible choice for (ByteBuffer key : keys) { QueryProcessor.validateKey(key); // Note that we should not share the slice filter amongst the command, due to SliceQueryFilter not // being immutable due to its columnCounter used by the lastCounted() method // (this is fairly ugly and we should change that but that's probably not a tiny refactor to do that cleanly) SliceQueryFilter filter = (SliceQueryFilter)makeFilter(variables); if (filter == null) return null; commands.add(new SliceFromReadCommand(keyspace(), key, queryPath, filter)); } } // ...of a list of column names else { // ByNames commands can share the filter IDiskAtomFilter filter = makeFilter(variables); if (filter == null) return null; for (ByteBuffer key: keys) { QueryProcessor.validateKey(key); commands.add(new SliceByNamesReadCommand(keyspace(), key, queryPath, (NamesQueryFilter)filter)); } } return commands; } private RangeSliceCommand getRangeCommand(List<ByteBuffer> variables) throws RequestValidationException { IDiskAtomFilter filter = makeFilter(variables); if (filter == null) return null; List<IndexExpression> expressions = getIndexExpressions(variables); // The LIMIT provided by the user is the number of CQL row he wants returned. // We want to have getRangeSlice to count the number of columns, not the number of keys. AbstractBounds<RowPosition> keyBounds = getKeyBounds(variables); return keyBounds == null ? null : new RangeSliceCommand(keyspace(), columnFamily(), null, filter, keyBounds, expressions, getLimit(), true, false); } private AbstractBounds<RowPosition> getKeyBounds(List<ByteBuffer> variables) throws InvalidRequestException { IPartitioner<?> p = StorageService.getPartitioner(); if (onToken) { Token startToken = getTokenBound(Bound.START, variables, p); Token endToken = getTokenBound(Bound.END, variables, p); boolean includeStart = includeKeyBound(Bound.START); boolean includeEnd = includeKeyBound(Bound.END); /* * If we ask SP.getRangeSlice() for (token(200), token(200)], it will happily return the whole ring. * However, wrapping range doesn't really make sense for CQL, and we want to return an empty result * in that case (CASSANDRA-5573). So special case to create a range that is guaranteed to be empty. * * In practice, we want to return an empty result set if either startToken > endToken, or both are * equal but one of the bound is excluded (since [a, a] can contains something, but not (a, a], [a, a) * or (a, a)). Note though that in the case where startToken or endToken is the minimum token, then * this special case rule should not apply. */ int cmp = startToken.compareTo(endToken); if (!startToken.isMinimum() && !endToken.isMinimum() && (cmp > 0 || (cmp == 0 && (!includeStart || !includeEnd)))) return null; RowPosition start = includeStart ? startToken.minKeyBound() : startToken.maxKeyBound(); RowPosition end = includeEnd ? endToken.maxKeyBound() : endToken.minKeyBound(); return new Range<RowPosition>(start, end); } else { ByteBuffer startKeyBytes = getKeyBound(Bound.START, variables); ByteBuffer finishKeyBytes = getKeyBound(Bound.END, variables); RowPosition startKey = RowPosition.forKey(startKeyBytes, p); RowPosition finishKey = RowPosition.forKey(finishKeyBytes, p); if (startKey.compareTo(finishKey) > 0 && !finishKey.isMinimum(p)) return null; if (includeKeyBound(Bound.START)) { return includeKeyBound(Bound.END) ? new Bounds<RowPosition>(startKey, finishKey) : new IncludingExcludingBounds<RowPosition>(startKey, finishKey); } else { return includeKeyBound(Bound.END) ? new Range<RowPosition>(startKey, finishKey) : new ExcludingBounds<RowPosition>(startKey, finishKey); } } } private IDiskAtomFilter makeFilter(List<ByteBuffer> variables) throws InvalidRequestException { if (isColumnRange()) { // For sparse, we used to ask for 'defined columns' * 'asked limit' (where defined columns includes the row marker) // to account for the grouping of columns. // Since that doesn't work for maps/sets/lists, we now use the compositesToGroup option of SliceQueryFilter. // But we must preserve backward compatibility too (for mixed version cluster that is). int multiplier = cfDef.isCompact ? 1 : (cfDef.metadata.size() + 1); int toGroup = cfDef.isCompact ? -1 : cfDef.columns.size(); List<ByteBuffer> startBounds = getRequestedBound(Bound.START, variables); List<ByteBuffer> endBounds = getRequestedBound(Bound.END, variables); assert startBounds.size() == endBounds.size(); // The case where startBounds == 1 is common enough that it's worth optimizing ColumnSlice[] slices; if (startBounds.size() == 1) { ColumnSlice slice = new ColumnSlice(startBounds.get(0), endBounds.get(0)); if (slice.isAlwaysEmpty(cfDef.cfm.comparator, isReversed)) return null; slices = new ColumnSlice[]{slice}; } else { List<ColumnSlice> l = new ArrayList<ColumnSlice>(startBounds.size()); for (int i = 0; i < startBounds.size(); i++) { ColumnSlice slice = new ColumnSlice(startBounds.get(i), endBounds.get(i)); if (!slice.isAlwaysEmpty(cfDef.cfm.comparator, isReversed)) l.add(slice); } if (l.isEmpty()) return null; slices = l.toArray(new ColumnSlice[l.size()]); } return new SliceQueryFilter(slices, isReversed, getLimit(), toGroup, multiplier); } else { SortedSet<ByteBuffer> columnNames = getRequestedColumns(variables); if (columnNames == null) // in case of IN () for the last column of the key return null; QueryProcessor.validateColumnNames(columnNames); return new NamesQueryFilter(columnNames, true); } } private int getLimit() { // Internally, we don't support exclusive bounds for slices. Instead, // we query one more element if necessary and exclude return sliceRestriction != null && !sliceRestriction.isInclusive(Bound.START) && parameters.limit != Integer.MAX_VALUE ? parameters.limit + 1 : parameters.limit; } private Collection<ByteBuffer> getKeys(final List<ByteBuffer> variables) throws InvalidRequestException { List<ByteBuffer> keys = new ArrayList<ByteBuffer>(); ColumnNameBuilder builder = cfDef.getKeyNameBuilder(); for (CFDefinition.Name name : cfDef.keys.values()) { Restriction r = keyRestrictions[name.position]; assert r != null; if (builder.remainingCount() == 1) { for (Term t : r.eqValues) { ByteBuffer val = t.bindAndGet(variables); if (val == null) throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name)); keys.add(builder.copy().add(val).build()); } } else { if (r.isINRestriction()) throw new InvalidRequestException("IN is only supported on the last column of the partition key"); ByteBuffer val = r.eqValues.get(0).bindAndGet(variables); if (val == null) throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", name)); builder.add(val); } } return keys; } private ByteBuffer getKeyBound(Bound b, List<ByteBuffer> variables) throws InvalidRequestException { // We deal with IN queries for keys in other places, so we know buildBound will return only one result return buildBound(b, cfDef.keys.values(), keyRestrictions, false, cfDef.getKeyNameBuilder(), variables).get(0); } private Token getTokenBound(Bound b, List<ByteBuffer> variables, IPartitioner<?> p) throws InvalidRequestException { assert onToken; Restriction keyRestriction = keyRestrictions[0]; Term t = keyRestriction.isEquality() ? keyRestriction.eqValues.get(0) : keyRestriction.bound(b); if (t == null) return p.getMinimumToken(); ByteBuffer value = t.bindAndGet(variables); if (value == null) throw new InvalidRequestException("Invalid null token value"); return p.getTokenFactory().fromByteArray(value); } private boolean includeKeyBound(Bound b) { for (Restriction r : keyRestrictions) { if (r == null) return true; else if (!r.isEquality()) return r.isInclusive(b); } // All equality return true; } private boolean isColumnRange() { // Due to CASSANDRA-5762, we always do a slice for CQL3 tables (not compact, composite). // Static CF (non compact but non composite) never entails a column slice however if (!cfDef.isCompact) return cfDef.isComposite; // Otherwise (i.e. for compact table where we don't have a row marker anyway and thus don't care about CASSANDRA-5762), // it is a range query if it has at least one the column alias for which no relation is defined or is not EQ. for (Restriction r : columnRestrictions) { if (r == null || !r.isEquality()) return true; } return false; } private SortedSet<ByteBuffer> getRequestedColumns(List<ByteBuffer> variables) throws InvalidRequestException { assert !isColumnRange(); ColumnNameBuilder builder = cfDef.getColumnNameBuilder(); Iterator<ColumnIdentifier> idIter = cfDef.columns.keySet().iterator(); for (Restriction r : columnRestrictions) { ColumnIdentifier id = idIter.next(); assert r != null && r.isEquality(); if (r.isINRestriction()) { // We have a IN, which we only support for the last column. // If compact, just add all values and we're done. Otherwise, // for each value of the IN, creates all the columns corresponding to the selection. if (r.eqValues.isEmpty()) return null; SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(cfDef.cfm.comparator); Iterator<Term> iter = r.eqValues.iterator(); while (iter.hasNext()) { Term v = iter.next(); ColumnNameBuilder b = iter.hasNext() ? builder.copy() : builder; ByteBuffer val = v.bindAndGet(variables); if (val == null) throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", id)); b.add(val); if (cfDef.isCompact) columns.add(b.build()); else columns.addAll(addSelectedColumns(b)); } return columns; } else { ByteBuffer val = r.eqValues.get(0).bindAndGet(variables); if (val == null) throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", id)); builder.add(val); } } return addSelectedColumns(builder); } private SortedSet<ByteBuffer> addSelectedColumns(ColumnNameBuilder builder) { if (cfDef.isCompact) { return FBUtilities.singleton(builder.build()); } else { // Collections require doing a slice query because a given collection is a // non-know set of columns, so we shouldn't get there assert !selectACollection(); SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(cfDef.cfm.comparator); // We need to query the selected column as well as the marker // column (for the case where the row exists but has no columns outside the PK) // One exception is "static CF" (non-composite non-compact CF) that // don't have marker and for which we must query all columns instead if (cfDef.isComposite) { // marker columns.add(builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build()); // selected columns for (ColumnIdentifier id : selection.regularColumnsToFetch()) columns.add(builder.copy().add(id.key).build()); } else { Iterator<ColumnIdentifier> iter = cfDef.metadata.keySet().iterator(); while (iter.hasNext()) { ColumnIdentifier name = iter.next(); ColumnNameBuilder b = iter.hasNext() ? builder.copy() : builder; ByteBuffer cname = b.add(name.key).build(); columns.add(cname); } } return columns; } } private boolean selectACollection() { if (!cfDef.hasCollections) return false; for (CFDefinition.Name name : selection.getColumnsList()) { if (name.type instanceof CollectionType) return true; } return false; } private List<ByteBuffer> buildBound(Bound bound, Collection<CFDefinition.Name> names, Restriction[] restrictions, boolean isReversed, ColumnNameBuilder builder, List<ByteBuffer> variables) throws InvalidRequestException { // The end-of-component of composite doesn't depend on whether the // component type is reversed or not (i.e. the ReversedType is applied // to the component comparator but not to the end-of-component itself), // it only depends on whether the slice is reversed Bound eocBound = isReversed ? Bound.reverse(bound) : bound; for (CFDefinition.Name name : names) { // In a restriction, we always have Bound.START < Bound.END for the "base" comparator. // So if we're doing a reverse slice, we must inverse the bounds when giving them as start and end of the slice filter. // But if the actual comparator itself is reversed, we must inversed the bounds too. Bound b = isReversed == isReversedType(name) ? bound : Bound.reverse(bound); Restriction r = restrictions[name.position]; if (r == null || (!r.isEquality() && r.bound(b) == null)) { // There wasn't any non EQ relation on that key, we select all records having the preceding component as prefix. // For composites, if there was preceding component and we're computing the end, we must change the last component // End-Of-Component, otherwise we would be selecting only one record. return Collections.singletonList(builder.componentCount() > 0 && eocBound == Bound.END ? builder.buildAsEndOfRange() : builder.build()); } if (r.isEquality()) { if (r.isINRestriction()) { // IN query, we only support it on the clustering column assert name.position == names.size() - 1; // The IN query might not have listed the values in comparator order, so we need to re-sort // the bounds lists to make sure the slices works correctly (also, to avoid duplicates). TreeSet<ByteBuffer> s = new TreeSet<ByteBuffer>(isReversed ? cfDef.cfm.comparator.reverseComparator : cfDef.cfm.comparator); for (Term t : r.eqValues) { ByteBuffer val = t.bindAndGet(variables); if (val == null) throw new InvalidRequestException(String.format("Invalid null clustering key part %s", name)); ColumnNameBuilder copy = builder.copy().add(val); // See below for why this s.add((bound == Bound.END && copy.remainingCount() > 0) ? copy.buildAsEndOfRange() : copy.build()); } return new ArrayList<ByteBuffer>(s); } ByteBuffer val = r.eqValues.get(0).bindAndGet(variables); if (val == null) throw new InvalidRequestException(String.format("Invalid null clustering key part %s", name)); builder.add(val); } else { Term t = r.bound(b); assert t != null; ByteBuffer val = t.bindAndGet(variables); if (val == null) throw new InvalidRequestException(String.format("Invalid null clustering key part %s", name)); return Collections.singletonList(builder.add(val, r.getRelation(eocBound, b)).build()); } } // Means no relation at all or everything was an equal // Note: if the builder is "full", there is no need to use the end-of-component bit. For columns selection, // it would be harmless to do it. However, we use this method got the partition key too. And when a query // with 2ndary index is done, and with the the partition provided with an EQ, we'll end up here, and in that // case using the eoc would be bad, since for the random partitioner we have no guarantee that // builder.buildAsEndOfRange() will sort after builder.build() (see #5240). return Collections.singletonList((bound == Bound.END && builder.remainingCount() > 0) ? builder.buildAsEndOfRange() : builder.build()); } private List<ByteBuffer> getRequestedBound(Bound b, List<ByteBuffer> variables) throws InvalidRequestException { assert isColumnRange(); return buildBound(b, cfDef.columns.values(), columnRestrictions, isReversed, cfDef.getColumnNameBuilder(), variables); } private List<IndexExpression> getIndexExpressions(List<ByteBuffer> variables) throws InvalidRequestException { if (metadataRestrictions.isEmpty()) return Collections.<IndexExpression>emptyList(); List<IndexExpression> expressions = new ArrayList<IndexExpression>(); for (Map.Entry<CFDefinition.Name, Restriction> entry : metadataRestrictions.entrySet()) { CFDefinition.Name name = entry.getKey(); Restriction restriction = entry.getValue(); if (restriction.isEquality()) { assert restriction.eqValues.size() == 1; // IN is not supported for indexed columns. ByteBuffer value = restriction.eqValues.get(0).bindAndGet(variables); if (value == null) throw new InvalidRequestException(String.format("Unsupported null value for indexed column %s", name)); if (value.remaining() > 0xFFFF) throw new InvalidRequestException("Index expression values may not be larger than 64K"); expressions.add(new IndexExpression(name.name.key, IndexOperator.EQ, value)); } else { for (Bound b : Bound.values()) { if (restriction.bound(b) != null) { ByteBuffer value = restriction.bound(b).bindAndGet(variables); if (value == null) throw new InvalidRequestException(String.format("Unsupported null value for indexed column %s", name)); if (value.remaining() > 0xFFFF) throw new InvalidRequestException("Index expression values may not be larger than 64K"); expressions.add(new IndexExpression(name.name.key, restriction.getIndexOperator(b), value)); } } } } return expressions; } private Iterable<IColumn> columnsInOrder(final ColumnFamily cf, final List<ByteBuffer> variables) throws InvalidRequestException { // If the restriction for the last column alias is an IN, respect // requested order Restriction last = columnRestrictions[columnRestrictions.length - 1]; if (last == null || !last.isEquality()) return cf.getSortedColumns(); ColumnNameBuilder builder = cfDef.getColumnNameBuilder(); for (int i = 0; i < columnRestrictions.length - 1; i++) builder.add(columnRestrictions[i].eqValues.get(0).bindAndGet(variables)); final List<ByteBuffer> requested = new ArrayList<ByteBuffer>(last.eqValues.size()); Iterator<Term> iter = last.eqValues.iterator(); while (iter.hasNext()) { Term t = iter.next(); ColumnNameBuilder b = iter.hasNext() ? builder.copy() : builder; requested.add(b.add(t.bindAndGet(variables)).build()); } return new Iterable<IColumn>() { public Iterator<IColumn> iterator() { return new AbstractIterator<IColumn>() { Iterator<ByteBuffer> iter = requested.iterator(); public IColumn computeNext() { if (!iter.hasNext()) return endOfData(); IColumn column = cf.getColumn(iter.next()); return column == null ? computeNext() : column; } }; } }; } private ResultSet process(List<Row> rows, List<ByteBuffer> variables) throws InvalidRequestException { Selection.ResultSetBuilder result = selection.resultSetBuilder(); for (org.apache.cassandra.db.Row row : rows) { // Not columns match the query, skip if (row.cf == null) continue; ByteBuffer[] keyComponents = null; if (cfDef.hasCompositeKey) { keyComponents = ((CompositeType)cfDef.cfm.getKeyValidator()).split(row.key.key); } else { keyComponents = new ByteBuffer[]{ row.key.key }; } if (cfDef.isCompact) { // One cqlRow per column for (IColumn c : columnsInOrder(row.cf, variables)) { if (c.isMarkedForDelete()) continue; ByteBuffer[] components = null; if (cfDef.isComposite) { components = ((CompositeType)cfDef.cfm.comparator).split(c.name()); } else if (sliceRestriction != null) { // For dynamic CF, the column could be out of the requested bounds, filter here if (!sliceRestriction.isInclusive(Bound.START) && c.name().equals(sliceRestriction.bound(Bound.START).bindAndGet(variables))) continue; if (!sliceRestriction.isInclusive(Bound.END) && c.name().equals(sliceRestriction.bound(Bound.END).bindAndGet(variables))) continue; } result.newRow(); // Respect selection order for (CFDefinition.Name name : selection.getColumnsList()) { switch (name.kind) { case KEY_ALIAS: result.add(keyComponents[name.position]); break; case COLUMN_ALIAS: ByteBuffer val = cfDef.isComposite ? (name.position < components.length ? components[name.position] : null) : c.name(); result.add(val); break; case VALUE_ALIAS: result.add(c); break; case COLUMN_METADATA: // This should not happen for compact CF throw new AssertionError(); default: throw new AssertionError(); } } } } else if (cfDef.isComposite) { // Sparse case: group column in cqlRow when composite prefix is equal CompositeType composite = (CompositeType)cfDef.cfm.comparator; ColumnGroupMap.Builder builder = new ColumnGroupMap.Builder(composite, cfDef.hasCollections); for (IColumn c : row.cf) { if (c.isMarkedForDelete()) continue; builder.add(c); } for (ColumnGroupMap group : builder.groups()) handleGroup(selection, result, row.key.key, keyComponents, group); } else { if (row.cf.hasOnlyTombstones()) continue; // Static case: One cqlRow for all columns result.newRow(); for (CFDefinition.Name name : selection.getColumnsList()) { if (name.kind == CFDefinition.Name.Kind.KEY_ALIAS) result.add(keyComponents[name.position]); else result.add(row.cf.getColumn(name.name.key)); } } } ResultSet cqlRows = result.build(); orderResults(cqlRows); // Internal calls always return columns in the comparator order, even when reverse was set if (isReversed) cqlRows.reverse(); // Trim result if needed to respect the limit cqlRows.trim(parameters.limit); return cqlRows; } /** * Orders results when multiple keys are selected (using IN) */ private void orderResults(ResultSet cqlRows) { // There is nothing to do if // a. there are no results, // b. no ordering information where given, // c. key restriction is a Range or not an IN expression if (cqlRows.size() == 0 || parameters.orderings.isEmpty() || isKeyRange || !keyIsInRelation) return; // optimization when only *one* order condition was given // because there is no point of using composite comparator if there is only one order condition if (parameters.orderings.size() == 1) { CFDefinition.Name ordering = cfDef.get(parameters.orderings.keySet().iterator().next()); Collections.sort(cqlRows.rows, new SingleColumnComparator(getColumnPositionInResultSet(cqlRows, ordering), ordering.type)); return; } // builds a 'composite' type for multi-column comparison from the comparators of the ordering components // and passes collected position information and built composite comparator to CompositeComparator to do // an actual comparison of the CQL rows. List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(parameters.orderings.size()); int[] positions = new int[parameters.orderings.size()]; int idx = 0; for (ColumnIdentifier identifier : parameters.orderings.keySet()) { CFDefinition.Name orderingColumn = cfDef.get(identifier); types.add(orderingColumn.type); positions[idx++] = getColumnPositionInResultSet(cqlRows, orderingColumn); } Collections.sort(cqlRows.rows, new CompositeComparator(types, positions)); } // determine position of column in the select clause private int getColumnPositionInResultSet(ResultSet rs, CFDefinition.Name columnName) { for (int i = 0; i < rs.metadata.names.size(); i++) { if (rs.metadata.names.get(i).name.equals(columnName.name)) return i; } throw new IllegalArgumentException(String.format("Column %s wasn't found in select clause.", columnName)); } /** * For sparse composite, returns wheter two columns belong to the same * cqlRow base on the full list of component in the name. * Two columns do belong together if they differ only by the last * component. */ private static boolean isSameRow(ByteBuffer[] c1, ByteBuffer[] c2) { // Cql don't allow to insert columns who doesn't have all component of // the composite set for sparse composite. Someone coming from thrift // could hit that though. But since we have no way to handle this // correctly, better fail here and tell whomever may hit that (if // someone ever do) to change the definition to a dense composite assert c1.length == c2.length : "Sparse composite should not have partial column names"; for (int i = 0; i < c1.length - 1; i++) { if (!c1[i].equals(c2[i])) return false; } return true; } private void handleGroup(Selection selection, Selection.ResultSetBuilder result, ByteBuffer key, ByteBuffer[] keyComponents, ColumnGroupMap columns) throws InvalidRequestException { // Respect requested order result.newRow(); for (CFDefinition.Name name : selection.getColumnsList()) { switch (name.kind) { case KEY_ALIAS: result.add(keyComponents[name.position]); break; case COLUMN_ALIAS: result.add(columns.getKeyComponent(name.position)); break; case VALUE_ALIAS: // This should not happen for SPARSE throw new AssertionError(); case COLUMN_METADATA: if (name.type.isCollection()) { List<Pair<ByteBuffer, IColumn>> collection = columns.getCollection(name.name.key); ByteBuffer value = collection == null ? null : ((CollectionType)name.type).serialize(collection); result.add(value); } else { result.add(columns.getSimple(name.name.key)); } break; } } } private static boolean isReversedType(CFDefinition.Name name) { return name.type instanceof ReversedType; } private boolean columnFilterIsIdentity() { for (Restriction r : columnRestrictions) { if (r != null) return false; } return true; } public static class RawStatement extends CFStatement { private final Parameters parameters; private final List<RawSelector> selectClause; private final List<Relation> whereClause; public RawStatement(CFName cfName, Parameters parameters, List<RawSelector> selectClause, List<Relation> whereClause) { super(cfName); this.parameters = parameters; this.selectClause = selectClause; this.whereClause = whereClause == null ? Collections.<Relation>emptyList() : whereClause; } public ParsedStatement.Prepared prepare() throws InvalidRequestException { CFMetaData cfm = ThriftValidation.validateColumnFamily(keyspace(), columnFamily()); if (parameters.limit <= 0) throw new InvalidRequestException("LIMIT must be strictly positive"); CFDefinition cfDef = cfm.getCfDef(); ColumnSpecification[] names = new ColumnSpecification[getBoundsTerms()]; // Select clause if (parameters.isCount && !selectClause.isEmpty()) throw new InvalidRequestException("Only COUNT(*) and COUNT(1) operations are currently supported."); Selection selection = selectClause.isEmpty() ? Selection.wildcard(cfDef) : Selection.fromSelectors(cfDef, selectClause); SelectStatement stmt = new SelectStatement(cfDef, getBoundsTerms(), parameters, selection); /* * WHERE clause. For a given entity, rules are: * - EQ relation conflicts with anything else (including a 2nd EQ) * - Can't have more than one LT(E) relation (resp. GT(E) relation) * - IN relation are restricted to row keys (for now) and conflicts with anything else * (we could allow two IN for the same entity but that doesn't seem very useful) * - The value_alias cannot be restricted in any way (we don't support wide rows with indexed value in CQL so far) */ for (Relation rel : whereClause) { CFDefinition.Name name = cfDef.get(rel.getEntity()); if (name == null) throw new InvalidRequestException(String.format("Undefined name %s in where clause ('%s')", rel.getEntity(), rel)); switch (name.kind) { case KEY_ALIAS: stmt.keyRestrictions[name.position] = updateRestriction(name, stmt.keyRestrictions[name.position], rel, names); break; case COLUMN_ALIAS: stmt.columnRestrictions[name.position] = updateRestriction(name, stmt.columnRestrictions[name.position], rel, names); break; case VALUE_ALIAS: throw new InvalidRequestException(String.format("Predicates on the non-primary-key column (%s) of a COMPACT table are not yet supported", name.name)); case COLUMN_METADATA: stmt.metadataRestrictions.put(name, updateRestriction(name, stmt.metadataRestrictions.get(name), rel, names)); break; } } /* * At this point, the select statement if fully constructed, but we still have a few things to validate */ // If a component of the PRIMARY KEY is restricted by a non-EQ relation, all preceding // components must have a EQ, and all following must have no restriction boolean shouldBeDone = false; CFDefinition.Name previous = null; Iterator<CFDefinition.Name> iter = cfDef.columns.values().iterator(); for (int i = 0; i < stmt.columnRestrictions.length; i++) { CFDefinition.Name cname = iter.next(); Restriction restriction = stmt.columnRestrictions[i]; if (restriction == null) { shouldBeDone = true; } else if (shouldBeDone) { throw new InvalidRequestException(String.format("PRIMARY KEY part %s cannot be restricted (preceding part %s is either not restricted or by a non-EQ relation)", cname, previous)); } else if (!restriction.isEquality()) { shouldBeDone = true; // For non-composite slices, we don't support internally the difference between exclusive and // inclusive bounds, so we deal with it manually. if (!cfDef.isComposite && (!restriction.isInclusive(Bound.START) || !restriction.isInclusive(Bound.END))) stmt.sliceRestriction = restriction; } // We only support IN for the last name so far // TODO: #3885 allows us to extend to other parts (cf. #4762) else if (restriction.isINRestriction()) { if (i != stmt.columnRestrictions.length - 1) throw new InvalidRequestException(String.format("PRIMARY KEY part %s cannot be restricted by IN relation", cname)); else if (stmt.selectACollection()) throw new InvalidRequestException(String.format("Cannot restrict PRIMARY KEY part %s by IN relation as a collection is selected by the query", cname)); } previous = cname; } // If a component of the partition key is restricted by a non-EQ relation, all preceding // components must have a EQ, and all following must have no restriction shouldBeDone = false; previous = null; stmt.keyIsInRelation = false; iter = cfDef.keys.values().iterator(); for (int i = 0; i < stmt.keyRestrictions.length; i++) { CFDefinition.Name cname = iter.next(); Restriction restriction = stmt.keyRestrictions[i]; if (restriction == null) { if (stmt.onToken) throw new InvalidRequestException("The token() function must be applied to all partition key components or none of them"); // The only time not restricting a key part is allowed is if none are restricted if (i > 0 && stmt.keyRestrictions[i-1] != null) throw new InvalidRequestException(String.format("Partition key part %s must be restricted since preceding part is", cname)); stmt.isKeyRange = true; shouldBeDone = true; } else if (shouldBeDone) { throw new InvalidRequestException(String.format("partition key part %s cannot be restricted (preceding part %s is either not restricted or by a non-EQ relation)", cname, previous)); } else if (restriction.onToken) { // If this is a query on tokens, it's necessarily a range query (there can be more than one key per token). stmt.isKeyRange = true; stmt.onToken = true; } else if (stmt.onToken) { throw new InvalidRequestException(String.format("The token() function must be applied to all partition key components or none of them")); } else if (restriction.isEquality()) { if (restriction.isINRestriction()) { // We only support IN for the last name so far if (i != stmt.keyRestrictions.length - 1) throw new InvalidRequestException(String.format("Partition KEY part %s cannot be restricted by IN relation (only the last part of the partition key can)", cname)); stmt.keyIsInRelation = true; } } else { throw new InvalidRequestException("Only EQ and IN relation are supported on the partition key (you will need to use the token() function for non equality based relation)"); } previous = cname; } // Deal with indexed columns if (!stmt.metadataRestrictions.isEmpty()) { stmt.isKeyRange = true; Set<ByteBuffer> indexedNames = new HashSet<ByteBuffer>(); for (ColumnDefinition cfdef : cfm.getColumn_metadata().values()) { if (cfdef.getIndexType() != null) { indexedNames.add(cfdef.name); } } // Note: we cannot use idxManager.indexes() methods because we don't have a complete column name at this point, we only // have the indexed component. boolean hasEq = false; for (Map.Entry<CFDefinition.Name, Restriction> entry : stmt.metadataRestrictions.entrySet()) { Restriction restriction = entry.getValue(); if (!restriction.isEquality()) continue; // We don't support IN for indexed values (basically this would require supporting a form of OR) if (restriction.isINRestriction()) throw new InvalidRequestException("Cannot use IN operator on column not part of the partition key"); if (indexedNames.contains(entry.getKey().name.key)) hasEq = true; } if (!hasEq) throw new InvalidRequestException("No indexed columns present in by-columns clause with Equal operator"); // If we have indexed columns and the key = X clause, we will do a range query, but if it's a IN relation, we don't know how to handle it. if (stmt.keyIsInRelation) throw new InvalidRequestException("Select on indexed columns and with IN clause for the PRIMARY KEY are not supported"); } if (!stmt.parameters.orderings.isEmpty()) { if (!stmt.metadataRestrictions.isEmpty()) throw new InvalidRequestException("ORDER BY with 2ndary indexes is not supported."); if (stmt.isKeyRange) throw new InvalidRequestException("ORDER BY is only supported when the partition key is restricted by an EQ or an IN."); // If we order an IN query, we'll have to do a manual sort post-query. Currently, this sorting requires that we // have queried the column on which we sort (TODO: we should update it to add the column on which we sort to the one // queried automatically, and then removing it from the resultSet afterwards if needed) if (stmt.keyIsInRelation && !selectClause.isEmpty()) // empty means wildcard was used { for (ColumnIdentifier column : stmt.parameters.orderings.keySet()) { CFDefinition.Name name = cfDef.get(column); boolean hasColumn = false; for (RawSelector selector : selectClause) { if (name.name.equals(selector)) { hasColumn = true; break; } } if (!hasColumn) throw new InvalidRequestException("ORDER BY could not be used on columns missing in select clause."); } } Boolean[] reversedMap = new Boolean[cfDef.columns.size()]; int i = 0; for (Map.Entry<ColumnIdentifier, Boolean> entry : stmt.parameters.orderings.entrySet()) { ColumnIdentifier column = entry.getKey(); boolean reversed = entry.getValue(); CFDefinition.Name name = cfDef.get(column); if (name == null) throw new InvalidRequestException(String.format("Order by on unknown column %s", column)); if (name.kind != CFDefinition.Name.Kind.COLUMN_ALIAS) throw new InvalidRequestException(String.format("Order by is currently only supported on the clustered columns of the PRIMARY KEY, got %s", column)); if (i++ != name.position) throw new InvalidRequestException(String.format("Order by currently only support the ordering of columns following their declared order in the PRIMARY KEY")); reversedMap[name.position] = (reversed != isReversedType(name)); } // Check that all boolean in reversedMap, if set, agrees Boolean isReversed = null; for (Boolean b : reversedMap) { // Column on which order is specified can be in any order if (b == null) continue; if (isReversed == null) { isReversed = b; continue; } if (isReversed != b) throw new InvalidRequestException(String.format("Unsupported order by relation")); } assert isReversed != null; stmt.isReversed = isReversed; } // Make sure this queries is allowed (note: only key range can involve filtering underneath) if (!parameters.allowFiltering && stmt.isKeyRange) { // We will potentially filter data if either: // - Have more than one IndexExpression // - Have no index expression and the column filter is not the identity if (stmt.metadataRestrictions.size() > 1 || (stmt.metadataRestrictions.isEmpty() && !stmt.columnFilterIsIdentity())) throw new InvalidRequestException("Cannot execute this query as it might involve data filtering and thus may have unpredictable performance. " + "If you want to execute this query despite the performance unpredictability, use ALLOW FILTERING"); } return new ParsedStatement.Prepared(stmt, Arrays.<ColumnSpecification>asList(names)); } Restriction updateRestriction(CFDefinition.Name name, Restriction restriction, Relation newRel, ColumnSpecification[] boundNames) throws InvalidRequestException { ColumnSpecification receiver = name; if (newRel.onToken) { if (name.kind != CFDefinition.Name.Kind.KEY_ALIAS) throw new InvalidRequestException(String.format("The token() function is only supported on the partition key, found on %s", name)); receiver = new ColumnSpecification(name.ksName, name.cfName, new ColumnIdentifier("partition key token", true), StorageService.getPartitioner().getTokenValidator()); } switch (newRel.operator()) { case EQ: { if (restriction != null) throw new InvalidRequestException(String.format("%s cannot be restricted by more than one relation if it includes an Equal", name)); Term t = newRel.getValue().prepare(receiver); t.collectMarkerSpecification(boundNames); restriction = new Restriction(t, newRel.onToken); } break; case IN: if (restriction != null) throw new InvalidRequestException(String.format("%s cannot be restricted by more than one relation if it includes a IN", name)); List<Term> inValues = new ArrayList<Term>(newRel.getInValues().size()); for (Term.Raw raw : newRel.getInValues()) { Term t = raw.prepare(receiver); t.collectMarkerSpecification(boundNames); inValues.add(t); } restriction = new Restriction(inValues); break; case GT: case GTE: case LT: case LTE: { if (restriction == null) restriction = new Restriction(newRel.onToken); Term t = newRel.getValue().prepare(receiver); t.collectMarkerSpecification(boundNames); restriction.setBound(name.name, newRel.operator(), t); } break; } return restriction; } @Override public String toString() { return String.format("SelectRawStatement[name=%s, selectClause=%s, whereClause=%s, isCount=%s, limit=%s]", cfName, selectClause, whereClause, parameters.isCount, parameters.limit); } } // A rather raw class that simplify validation and query for select // Don't made public as this can be easily badly used private static class Restriction { // for equality List<Term> eqValues; // if null, it's a restriction by bounds // for bounds private final Term[] bounds; private final boolean[] boundInclusive; final boolean onToken; Restriction(List<Term> values, boolean onToken) { this.eqValues = values; this.bounds = null; this.boundInclusive = null; this.onToken = onToken; } Restriction(List<Term> values) { this(values, false); } Restriction(Term value, boolean onToken) { this(Collections.singletonList(value), onToken); } Restriction(boolean onToken) { this.eqValues = null; this.bounds = new Term[2]; this.boundInclusive = new boolean[2]; this.onToken = onToken; } boolean isEquality() { return eqValues != null; } boolean isINRestriction() { return isEquality() && (eqValues.isEmpty() || eqValues.size() > 1); } public Term bound(Bound b) { return bounds[b.idx]; } public boolean isInclusive(Bound b) { return bounds[b.idx] == null || boundInclusive[b.idx]; } public Relation.Type getRelation(Bound eocBound, Bound inclusiveBound) { switch (eocBound) { case START: return boundInclusive[inclusiveBound.idx] ? Relation.Type.GTE : Relation.Type.GT; case END: return boundInclusive[inclusiveBound.idx] ? Relation.Type.LTE : Relation.Type.LT; } throw new AssertionError(); } public IndexOperator getIndexOperator(Bound b) { switch (b) { case START: return boundInclusive[b.idx] ? IndexOperator.GTE : IndexOperator.GT; case END: return boundInclusive[b.idx] ? IndexOperator.LTE : IndexOperator.LT; } throw new AssertionError(); } public void setBound(ColumnIdentifier name, Relation.Type type, Term t) throws InvalidRequestException { Bound b = null; boolean inclusive = false; switch (type) { case GT: b = Bound.START; inclusive = false; break; case GTE: b = Bound.START; inclusive = true; break; case LT: b = Bound.END; inclusive = false; break; case LTE: b = Bound.END; inclusive = true; break; } if (bounds == null) throw new InvalidRequestException(String.format("%s cannot be restricted by both an equal and an inequal relation", name)); if (bounds[b.idx] != null) throw new InvalidRequestException(String.format("Invalid restrictions found on %s", name)); bounds[b.idx] = t; boundInclusive[b.idx] = inclusive; } @Override public String toString() { String s; if (eqValues == null) { s = String.format("SLICE(%s %s, %s %s)", boundInclusive[0] ? ">=" : ">", bounds[0], boundInclusive[1] ? "<=" : "<", bounds[1]); } else { s = String.format("EQ(%s)", eqValues); } return onToken ? s + "*" : s; } } public static class Parameters { private final int limit; private final Map<ColumnIdentifier, Boolean> orderings; private final boolean isCount; private final boolean allowFiltering; public Parameters(int limit, Map<ColumnIdentifier, Boolean> orderings, boolean isCount, boolean allowFiltering) { this.limit = limit; this.orderings = orderings; this.isCount = isCount; this.allowFiltering = allowFiltering; } } /** * Used in orderResults(...) method when single 'ORDER BY' condition where given */ private static class SingleColumnComparator implements Comparator<List<ByteBuffer>> { private final int index; private final AbstractType<?> comparator; public SingleColumnComparator(int columnIndex, AbstractType<?> orderer) { index = columnIndex; comparator = orderer; } public int compare(List<ByteBuffer> a, List<ByteBuffer> b) { return comparator.compare(a.get(index), b.get(index)); } } /** * Used in orderResults(...) method when multiple 'ORDER BY' conditions where given */ private static class CompositeComparator implements Comparator<List<ByteBuffer>> { private final List<AbstractType<?>> orderTypes; private final int[] positions; private CompositeComparator(List<AbstractType<?>> orderTypes, int[] positions) { this.orderTypes = orderTypes; this.positions = positions; } public int compare(List<ByteBuffer> a, List<ByteBuffer> b) { for (int i = 0; i < positions.length; i++) { AbstractType<?> type = orderTypes.get(i); int columnPos = positions[i]; ByteBuffer aValue = a.get(columnPos); ByteBuffer bValue = b.get(columnPos); int comparison = type.compare(aValue, bValue); if (comparison != 0) return comparison; } return 0; } } }
Sonnbc/modelCheckingCassandra
src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
Java
apache-2.0
65,426
/* * Copyright 2009-2013 by The Regents of the University of California * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * you may obtain a copy of the License from * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.asterix.dataflow.data.nontagged.printers; import java.io.IOException; import java.io.PrintStream; import edu.uci.ics.hyracks.algebricks.common.exceptions.AlgebricksException; import edu.uci.ics.hyracks.algebricks.data.IPrinter; import edu.uci.ics.hyracks.algebricks.data.utils.WriteValueTools; import edu.uci.ics.hyracks.data.std.primitive.ShortPointable; public class ShortWithoutTypeInfoPrinter implements IPrinter { public static final ShortWithoutTypeInfoPrinter INSTANCE = new ShortWithoutTypeInfoPrinter(); @Override public void init() { } @Override public void print(byte[] b, int s, int l, PrintStream ps) throws AlgebricksException { short d = ShortPointable.getShort(b, s); try { WriteValueTools.writeInt((int)d, ps); } catch (IOException e) { throw new AlgebricksException(e); } } }
sjaco002/incubator-asterixdb
asterix-om/src/main/java/edu/uci/ics/asterix/dataflow/data/nontagged/printers/ShortWithoutTypeInfoPrinter.java
Java
apache-2.0
1,538
package forms; /** * A form for the search widget. * * @author AJ * */ public class SearchFormData { /** The name to be searched for. */ public String name = ""; /** The gender of the player. */ public String type = ""; /** The country of the player. */ public String court = ""; /** The position of the player. */ public String position = ""; /**Empty constructor. */ public SearchFormData() { } }
andrewpw/HawaiiHoopsFinish
app/forms/SearchFormData.java
Java
apache-2.0
434
<?php namespace Fungku\NetSuite\Classes; class SearchColumnLongCustomField extends SearchColumnCustomField { public $searchValue; static $paramtypesmap = array( "searchValue" => "integer", ); }
bitclaw/netsuite-php
src/Classes/SearchColumnLongCustomField.php
PHP
apache-2.0
201
package org.wikipedia.savedpages; import android.content.Context; import org.wikipedia.PageTitle; import org.wikipedia.WikipediaApp; import org.wikipedia.concurrency.SaneAsyncTask; import org.wikipedia.page.Page; import java.io.File; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; /** Actual work to save a page for offline reading. */ public class SavePageTask extends SaneAsyncTask<Boolean> { private final WikipediaApp app; private final PageTitle title; private final Page page; private CountDownLatch imagesDownloadedLatch; public SavePageTask(Context context, PageTitle title, Page page) { super(SINGLE_THREAD); app = (WikipediaApp) context.getApplicationContext(); this.title = title; this.page = page; } @Override public Boolean performTask() throws Throwable { SavedPage savedPage = new SavedPage(title); savedPage.writeToFileSystem(page); SavedPagePersister persister = (SavedPagePersister) app.getPersister(SavedPage.class); persister.upsert(savedPage); final ImageUrlMap imageUrlMap = new ImageUrlMap.Builder(savedPage.getBaseDir()).extractUrls(page).build(); final int numImagesAttempts = imageUrlMap.size(); parallelDownload(imageUrlMap); savedPage.writeUrlMap(imageUrlMap.toJSON()); return numImagesAttempts == imageUrlMap.size(); } /** * Fans out image download to multiple threads so this is faster. * Borrows from SHA ab2676f4732f186696ce37d02fefa3e6aee13042. * * @param imageUrlMap a Map with entries {source URL, file path} of images to be downloaded * @throws InterruptedException */ private void parallelDownload(final ImageUrlMap imageUrlMap) throws InterruptedException { imagesDownloadedLatch = new CountDownLatch(imageUrlMap.size()); List<DownloadImageTask> tasks = new ArrayList<DownloadImageTask>(); // instantiate the tasks first, then execute them all at once. // (so that removing URLs in onCatch doesn't mess with the iterator) for (Map.Entry<String, String> entry : imageUrlMap.entrySet()) { final String url = entry.getKey(); final File file = new File(entry.getValue()); tasks.add(new DownloadImageTask(app, url, file) { @Override public void onFinish(Boolean result) { imagesDownloadedLatch.countDown(); } @Override public void onCatch(Throwable caught) { // TODO: Add retries // An image failed to download, so exclude it from our URL Map imageUrlMap.remove(url); imagesDownloadedLatch.countDown(); } }); } for (DownloadImageTask task : tasks) { task.execute(); } imagesDownloadedLatch.await(); } }
creaITve/apps-android-tbrc-works
wikipedia/src/main/java/org/wikipedia/savedpages/SavePageTask.java
Java
apache-2.0
3,027
# Description: Chef-Vault VaultDownload class # Copyright 2014-15, Nordstrom, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require_relative "vault_base" class Chef class Knife class VaultDownload < Knife include Chef::Knife::VaultBase banner "knife vault download VAULT ITEM PATH (options)" def run vault = @name_args[0] item = @name_args[1] path = @name_args[2] set_mode(config[:vault_mode]) if vault && item && path vault_item = ChefVault::Item.load(vault, item) File.open(path, "w") do |file| file.write(vault_item["file-content"]) end ui.info("Saved #{vault_item["file-name"]} as #{path}") else show_usage end end end end end
Nordstrom/chef-vault
lib/chef/knife/vault_download.rb
Ruby
apache-2.0
1,293
# # Author:: Nathan Cerny <ncerny@chef.io> # # Cookbook Name:: chef_stack # Resource:: wf_runner # # Copyright 2017 Chef Software Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. resource_name 'workflow_builder' default_action :create property :name, String, name_property: true property :channel, Symbol, default: :stable property :version, [String, Symbol], default: :latest property :pj_version, [String, Symbol], default: :latest property :accept_license, [TrueClass, FalseClass], default: false property :chef_user, String, default: 'workflow' property :chef_user_pem, String, required: true property :builder_pem, String, required: true property :chef_fqdn, String, default: URI.parse(Chef::Config['chef_server_url']).host property :automate_fqdn, String, required: true property :supermarket_fqdn, String property :job_dispatch_version, String, default: 'v2' property :automate_user, String, default: 'admin' property :automate_password, String property :automate_enterprise, String, default: 'chef' property :chef_config_path, String, default: '/etc/chef/client.rb' property :platform, String property :platform_version, String load_current_value do # node.run_state['chef-users'] ||= Mixlib::ShellOut.new('chef-server-ctl user-list').run_command.stdout # current_value_does_not_exist! unless node.run_state['chef-users'].index(/^#{username}$/) end action :create do chef_ingredient 'chefdk' do action :upgrade channel new_resource.channel version new_resource.version accept_license new_resource.accept_license platform new_resource.platform if new_resource.platform platform_version new_resource.platform_version if new_resource.platform_version end directory '/etc/chef/trusted_certs' do recursive true mode '0755' end [ new_resource.chef_fqdn, new_resource.automate_fqdn, new_resource.supermarket_fqdn, ].each do |server| execute "fetch ssl cert for #{server}" do command "knife ssl fetch -s https://#{server} -c #{Chef::Config['config_file']}" not_if "knife ssl check -s https://#{server} -c #{Chef::Config['config_file']}" ignore_failure true end end execute 'cat /etc/chef/trusted_certs/*.crt >> /opt/chefdk/embedded/ssl/certs/cacert.pem' ohai 'reload_passwd' do action :nothing plugin 'etc' ignore_failure true end workspace = '/var/opt/delivery/workspace' group 'dbuild' user 'dbuild' do home workspace group 'dbuild' notifies :reload, 'ohai[reload_passwd]', :immediately end %w(.chef bin lib etc).each do |dir| directory "#{workspace}/#{dir}" do mode '0755' owner 'dbuild' group 'dbuild' recursive true end end %w(etc .chef).each do |dir| chef_file "#{workspace}/#{dir}/builder_key" do source new_resource.builder_pem mode '0600' user 'root' group 'root' end chef_file "#{workspace}/#{dir}/#{chef_user}.pem" do source new_resource.chef_user_pem mode '0600' user 'root' group 'root' end end %w(etc/delivery.rb .chef/knife.rb).each do |dir| file "#{workspace}/#{dir}" do content ensurekv(::File.read(new_resource.chef_config_path), node_name: new_resource.chef_user, log_location: :STDOUT, client_key: "#{workspace}/#{dir}/#{new_resource.chef_user}.pem", trusted_certs_dir: '/etc/chef/trusted_certs') mode '0644' owner 'dbuild' group 'dbuild' end end remote_file "#{workspace}/bin/git_ssh" do source "https://#{automate_fqdn}/installer/git-ssh-wrapper" owner 'dbuild' group 'dbuild' mode '0755' end remote_file "#{workspace}/bin/delivery-cmd" do source "https://#{automate_fqdn}/installer/delivery-cmd" owner 'root' group 'root' mode '0750' end file '/etc/chef/client.pem' do owner 'root' group 'dbuild' mode '0640' end file new_resource.chef_config_path do mode '0644' end Dir.glob('/etc/chef/trusted_certs/*').each do |fn| file fn do mode '0644' end end case new_resource.job_dispatch_version when 'v1' execute 'tag node as legacy build-node' do command "knife tag create #{Chef::Config['node_name']} delivery-build-node -c new_resource.chef_config_path" not_if { node['tags'].include?('delivery-build-node') } end directory '/var/log/push-jobs-client' do recursive true end chef_ingredient 'push-jobs-client' do version new_resource.pj_version end template '/etc/chef/push-jobs-client.rb' do source 'push-jobs-client.rb.erb' notifies :restart, 'service[push-jobs-client]', :delayed end if node['init_package'].eql?('systemd') init_template = 'push-jobs-client-systemd' init_file = '/etc/systemd/system/push-jobs-client.service' elsif node['platform_family'].eql?('debian') init_template = 'push-jobs-client-ubuntu-upstart' init_file = '/etc/init/push-jobs-client.conf' elsif node['platform_family'].eql?('rhel') init_template = 'push-jobs-client-rhel-6' init_file = '/etc/rc.d/init.d/push-jobs-client' else raise 'Unsupported platform for build node' end remote_file init_file do source "https://#{automate_fqdn}/installer/#{init_template}" mode '0755' notifies :restart, 'service[push-jobs-client]', :delayed end service 'push-jobs-client' do action [:enable, :start] end when 'v2' build_user = 'job_runner' home_dir = '/home/job_runner' execute 'tag node as job-runner' do command "knife tag create #{Chef::Config['node_name']} delivery-job-runner -c #{new_resource.chef_config_path}" not_if { node['tags'].include?('delivery-job-runner') } end user build_user do action [:create, :lock] home home_dir end directory home_dir do owner build_user group build_user end directory "#{home_dir}/.ssh" do owner build_user group build_user notifies :touch, "file[#{home_dir}/.ssh/authorized_keys]", :immediately end file "#{home_dir}/.ssh/authorized_keys" do action :nothing end # TODO: Figure out how to auto-detect enterprise ruby_block 'install job runner' do # ~FC014 block do ENV['AUTOMATE_PASSWORD'] = new_resource.automate_password Mixlib::ShellOut.new("delivery token \ -s #{new_resource.automate_fqdn} \ -e #{new_resource.automate_enterprise} \ -u #{new_resource.automate_user}").run_command data = { hostname: new_resource.name, os: node['os'], platform_family: node['platform_family'], platform: node['platform'], platform_version: node['platform_version'], } runner = Mixlib::ShellOut.new("delivery api post runners \ -d '#{data.to_json}' \ -s #{new_resource.automate_fqdn} \ -e #{new_resource.automate_enterprise} \ -u #{new_resource.automate_user}").run_command ::File.write(::File.join(home_dir, '.ssh/authorized_keys'), JSON.parse(runner.stdout)['openssh_public_key']) end not_if { ::File.read(::File.join(home_dir, '.ssh/authorized_keys')).include?("#{build_user}@#{node['fqdn']}") } end file ::File.join('/etc/sudoers.d', build_user) do content <<-EOF #{build_user} ALL=(root) NOPASSWD:/usr/local/bin/delivery-cmd, /bin/ls Defaults:#{build_user} !requiretty Defaults:#{build_user} secure_path = /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin EOF mode '0440' end directory ::File.join(home_dir, '.ssh') do owner build_user group build_user mode '0700' end file ::File.join(home_dir, '.ssh/authorized_keys') do owner build_user group build_user mode '0600' end file '/usr/local/bin/delivery-cmd' do content lazy { ::File.read('/var/opt/delivery/workspace/bin/delivery-cmd') } owner 'dbuild' group 'dbuild' mode '0755' end else raise 'Invalid Runner Version' end end action_class.class_eval do include ChefIngredientCookbook::Helpers end
vinyar/chef-ingredient
resources/wf_builder.rb
Ruby
apache-2.0
8,768
<?php class AuthAuthorizationProviderTest extends PHPUnit_Framework_TestCase { /** * @expectedException \Exception */ public function testValidatingAuthorizationHeaderFailsWhenInvalidAndThrowsException() { $provider = new AuthorizationProviderStub; $request = Illuminate\Http\Request::create('/', 'GET'); $request->headers->set('authorization', 'bar'); $provider->validateAuthorizationHeader($request); } public function testValidatingAuthorizationHeaderSucceedsAndReturnsNull() { $provider = new AuthorizationProviderStub; $request = Illuminate\Http\Request::create('/', 'GET'); $request->headers->set('authorization', 'foo'); $this->assertNull($provider->validateAuthorizationHeader($request)); } }
prady00/Laravel-Swagger-REST
vendor/dingo/api/tests/AuthAuthorizationProviderTest.php
PHP
apache-2.0
735
# # Author:: Adam Jacob (<adam@chef.io>) # Author:: Seth Chisamore (<schisamo@chef.io>) # Author:: Tyler Cloke (<tyler@chef.io>) # Copyright:: Copyright 2008-2016, Chef Software Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # require "chef/resource/file" require "chef/mixin/securable" class Chef class Resource # A cookbook template is an Embedded Ruby (ERB) template that is used to dynamically generate static text files. # Templates may contain Ruby expressions and statements, and are a great way to manage configuration files. Use the # template resource to add cookbook templates to recipes; place the corresponding Embedded Ruby (ERB) template file # in a cookbook’s /templates directory. # # Use the template resource to manage the contents of a file using an Embedded Ruby (ERB) template by transferring # files from a sub-directory of COOKBOOK_NAME/templates/ to a specified path located on a host that is running the # chef-client. This resource includes actions and properties from the file resource. Template files managed by the # template resource follow the same file specificity rules as the remote_file and file resources. class Template < Chef::Resource::File resource_name :template provides :template include Chef::Mixin::Securable attr_reader :inline_helper_blocks attr_reader :inline_helper_modules def initialize(name, run_context = nil) super @source = "#{::File.basename(name)}.erb" @inline_helper_blocks = {} @inline_helper_modules = [] @helper_modules = [] end def source(file = nil) set_or_return( :source, file, :kind_of => [ String, Array ] ) end property :variables, Hash, default: lazy { Hash.new } property :cookbook, String property :local, [ TrueClass, FalseClass ], default: false # Declares a helper method to be defined in the template context when # rendering. # # === Example: # # ==== Basic usage: # Given the following helper: # helper(:static_value) { "hello from helper" } # A template with the following code: # <%= static_value %> # Will render as; # hello from helper # # ==== Referencing Instance Variables: # Any instance variables available to the template can be referenced in # the method body. For example, you can simplify accessing app-specific # node attributes like this: # helper(:app) { @node[:my_app_attributes] } # And use it in a template like this: # <%= app[:listen_ports] %> # This is equivalent to the non-helper template code: # <%= @node[:my_app_attributes][:listen_ports] %> # # ==== Method Arguments: # Helper methods can also take arguments. The syntax available for # argument specification supports full syntax available for method # definition. # # Continuing the above example of simplifying attribute access, we can # define a helper to look up app-specific attributes like this: # helper(:app) { |setting| @node[:my_app_attributes][setting] } # The template can then look up attributes like this: # <%= app(:listen_ports) %> def helper(method_name, &block) unless block_given? raise Exceptions::ValidationFailed, "`helper(:method)` requires a block argument (e.g., `helper(:method) { code }`)" end unless method_name.kind_of?(Symbol) raise Exceptions::ValidationFailed, "method_name argument to `helper(method_name)` must be a symbol (e.g., `helper(:method) { code }`)" end @inline_helper_blocks[method_name] = block end # Declares a module to define helper methods in the template's context # when rendering. There are two primary forms. # # === Inline Module Definition # When a block is given, the block is used to define a module which is # then mixed in to the template context w/ `extend`. # # ==== Inline Module Example # Given the following code in the template resource: # helpers do # # Add "syntax sugar" for referencing app-specific attributes # def app(attribute) # @node[:my_app_attributes][attribute] # end # end # You can use it in the template like so: # <%= app(:listen_ports) %> # Which is equivalent to: # <%= @node[:my_app_attributes][:listen_ports] %> # # === External Module Form # When a module name is given, the template context will be extended with # that module. This is the recommended way to customize template contexts # when you need to define more than an handful of helper functions (but # also try to keep your template helpers from getting out of hand--if you # have very complex logic in your template helpers, you should further # extract your code into separate libraries). # # ==== External Module Example # To extract the above inline module code to a library, you'd create a # library file like this: # module MyTemplateHelper # # Add "syntax sugar" for referencing app-specific attributes # def app(attribute) # @node[:my_app_attributes][attribute] # end # end # And in the template resource: # helpers(MyTemplateHelper) # The template code in the above example will work unmodified. def helpers(module_name = nil, &block) if block_given? && !module_name.nil? raise Exceptions::ValidationFailed, "Passing both a module and block to #helpers is not supported. Call #helpers multiple times instead" elsif block_given? @inline_helper_modules << block elsif module_name.kind_of?(::Module) @helper_modules << module_name elsif module_name.nil? raise Exceptions::ValidationFailed, "#helpers requires either a module name or inline module code as a block.\n" + "e.g.: helpers do; helper_code; end;\n" + "OR: helpers(MyHelpersModule)" else raise Exceptions::ValidationFailed, "Argument to #helpers must be a module. You gave #{module_name.inspect} (#{module_name.class})" end end # Compiles all helpers from inline method definitions, inline module # definitions, and external modules into an Array of Modules. The context # object for the template is extended with these modules to provide # per-resource template logic. def helper_modules compiled_helper_methods + compiled_helper_modules + @helper_modules end private # compiles helper methods into a module that can be included in template context def compiled_helper_methods if inline_helper_blocks.empty? [] else resource_helper_blocks = inline_helper_blocks helper_mod = Module.new do resource_helper_blocks.each do |method_name, method_body| define_method(method_name, &method_body) end end [ helper_mod ] end end def compiled_helper_modules @inline_helper_modules.map do |module_body| Module.new(&module_body) end end end end end
juliandunn/chef
lib/chef/resource/template.rb
Ruby
apache-2.0
8,061
var orig = { from: 20, to: 500 }; var board = tnt.board().from(orig.from).to(orig.to); // reload button var reload = d3.select(yourDiv) .append("button") .text("reset") .style("margin", "10px") .on("click", function () { board.from(orig.from); board.to(orig.to); board.start(); }); var axis_track = tnt.board.track() .height(0) .color("white") .display(tnt.board.track.feature.axis() .orientation("top") ); var pin_track = tnt.board.track() .height(60) .color("white") .display(tnt.board.track.feature.pin() .domain([0.3, 1.2]) .color("red") .on("click", function (d) { console.log(d); }) .on("mouseover", function (d) { console.log("mouseover"); }) ) .data(tnt.board.track.data.sync() .retriever (function () { return [ { pos : 200, val : 0.5, label : "1" }, { pos : 355, val : 0.8, label : "2" }, { pos : 100, val : 0.3, label : "3" }, { pos : 400, val : 1, label : "4" } ]; }) ); board .add_track(axis_track) .add_track(pin_track); board(yourDiv); board.start();
tntvis/tnt.board
snippets/reload.js
JavaScript
apache-2.0
1,560
# Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'faraday' require 'multi_json' require 'compat/multi_json' require 'stringio' require 'google/api_client/version' require 'google/api_client/logging' require 'google/api_client/errors' require 'google/api_client/environment' require 'google/api_client/discovery' require 'google/api_client/request' require 'google/api_client/reference' require 'google/api_client/result' require 'google/api_client/media' require 'google/api_client/service_account' require 'google/api_client/batch' require 'google/api_client/gzip' require 'google/api_client/client_secrets' require 'google/api_client/railtie' if defined?(Rails::Railtie) module Google ## # This class manages APIs communication. class APIClient include Google::APIClient::Logging ## # Creates a new Google API client. # # @param [Hash] options The configuration parameters for the client. # @option options [Symbol, #generate_authenticated_request] :authorization # (:oauth_1) # The authorization mechanism used by the client. The following # mechanisms are supported out-of-the-box: # <ul> # <li><code>:two_legged_oauth_1</code></li> # <li><code>:oauth_1</code></li> # <li><code>:oauth_2</code></li> # </ul> # @option options [Boolean] :auto_refresh_token (true) # The setting that controls whether or not the api client attempts to # refresh authorization when a 401 is hit in #execute. If the token does # not support it, this option is ignored. # @option options [String] :application_name # The name of the application using the client. # @option options [String] :application_version # The version number of the application using the client. # @option options [String] :user_agent # ("{app_name} google-api-ruby-client/{version} {os_name}/{os_version}") # The user agent used by the client. Most developers will want to # leave this value alone and use the `:application_name` option instead. # @option options [String] :host ("www.googleapis.com") # The API hostname used by the client. This rarely needs to be changed. # @option options [String] :port (443) # The port number used by the client. This rarely needs to be changed. # @option options [String] :discovery_path ("/discovery/v1") # The discovery base path. This rarely needs to be changed. # @option options [String] :ca_file # Optional set of root certificates to use when validating SSL connections. # By default, a bundled set of trusted roots will be used. def initialize(options={}) logger.debug { "#{self.class} - Initializing client with options #{options}" } # Normalize key to String to allow indifferent access. options = options.inject({}) do |accu, (key, value)| accu[key.to_sym] = value accu end # Almost all API usage will have a host of 'www.googleapis.com'. self.host = options[:host] || 'www.googleapis.com' self.port = options[:port] || 443 self.discovery_path = options[:discovery_path] || '/discovery/v1' # Most developers will want to leave this value alone and use the # application_name option. if options[:application_name] app_name = options[:application_name] app_version = options[:application_version] application_string = "#{app_name}/#{app_version || '0.0.0'}" else logger.warn { "#{self.class} - Please provide :application_name and :application_version when initializing the client" } end self.user_agent = options[:user_agent] || ( "#{application_string} " + "google-api-ruby-client/#{Google::APIClient::VERSION::STRING} #{ENV::OS_VERSION} (gzip)" ).strip # The writer method understands a few Symbols and will generate useful # default authentication mechanisms. self.authorization = options.key?(:authorization) ? options[:authorization] : :oauth_2 self.auto_refresh_token = options.fetch(:auto_refresh_token) { true } self.key = options[:key] self.user_ip = options[:user_ip] @discovery_uris = {} @discovery_documents = {} @discovered_apis = {} ca_file = options[:ca_file] || File.expand_path('../../cacerts.pem', __FILE__) self.connection = Faraday.new do |faraday| faraday.response :gzip faraday.options.params_encoder = Faraday::FlatParamsEncoder faraday.ssl.ca_file = ca_file faraday.ssl.verify = true faraday.adapter Faraday.default_adapter end return self end ## # Returns the authorization mechanism used by the client. # # @return [#generate_authenticated_request] The authorization mechanism. attr_reader :authorization ## # Sets the authorization mechanism used by the client. # # @param [#generate_authenticated_request] new_authorization # The new authorization mechanism. def authorization=(new_authorization) case new_authorization when :oauth_1, :oauth require 'signet/oauth_1/client' # NOTE: Do not rely on this default value, as it may change new_authorization = Signet::OAuth1::Client.new( :temporary_credential_uri => 'https://www.google.com/accounts/OAuthGetRequestToken', :authorization_uri => 'https://www.google.com/accounts/OAuthAuthorizeToken', :token_credential_uri => 'https://www.google.com/accounts/OAuthGetAccessToken', :client_credential_key => 'anonymous', :client_credential_secret => 'anonymous' ) when :two_legged_oauth_1, :two_legged_oauth require 'signet/oauth_1/client' # NOTE: Do not rely on this default value, as it may change new_authorization = Signet::OAuth1::Client.new( :client_credential_key => nil, :client_credential_secret => nil, :two_legged => true ) when :oauth_2 require 'signet/oauth_2/client' # NOTE: Do not rely on this default value, as it may change new_authorization = Signet::OAuth2::Client.new( :authorization_uri => 'https://accounts.google.com/o/oauth2/auth', :token_credential_uri => 'https://accounts.google.com/o/oauth2/token' ) when nil # No authorization mechanism else if !new_authorization.respond_to?(:generate_authenticated_request) raise TypeError, 'Expected authorization mechanism to respond to ' + '#generate_authenticated_request.' end end @authorization = new_authorization return @authorization end ## # Default Faraday/HTTP connection. # # @return [Faraday::Connection] attr_accessor :connection ## # The setting that controls whether or not the api client attempts to # refresh authorization when a 401 is hit in #execute. # # @return [Boolean] attr_accessor :auto_refresh_token ## # The application's API key issued by the API console. # # @return [String] The API key. attr_accessor :key ## # The IP address of the user this request is being performed on behalf of. # # @return [String] The user's IP address. attr_accessor :user_ip ## # The user agent used by the client. # # @return [String] # The user agent string used in the User-Agent header. attr_accessor :user_agent ## # The API hostname used by the client. # # @return [String] # The API hostname. Should almost always be 'www.googleapis.com'. attr_accessor :host ## # The port number used by the client. # # @return [String] # The port number. Should almost always be 443. attr_accessor :port ## # The base path used by the client for discovery. # # @return [String] # The base path. Should almost always be '/discovery/v1'. attr_accessor :discovery_path ## # Returns the URI for the directory document. # # @return [Addressable::URI] The URI of the directory document. def directory_uri return resolve_uri(self.discovery_path + '/apis') end ## # Manually registers a URI as a discovery document for a specific version # of an API. # # @param [String, Symbol] api The API name. # @param [String] version The desired version of the API. # @param [Addressable::URI] uri The URI of the discovery document. def register_discovery_uri(api, version, uri) api = api.to_s version = version || 'v1' @discovery_uris["#{api}:#{version}"] = uri end ## # Returns the URI for the discovery document. # # @param [String, Symbol] api The API name. # @param [String] version The desired version of the API. # @return [Addressable::URI] The URI of the discovery document. def discovery_uri(api, version=nil) api = api.to_s version = version || 'v1' return @discovery_uris["#{api}:#{version}"] ||= ( resolve_uri( self.discovery_path + '/apis/{api}/{version}/rest', 'api' => api, 'version' => version ) ) end ## # Manually registers a pre-loaded discovery document for a specific version # of an API. # # @param [String, Symbol] api The API name. # @param [String] version The desired version of the API. # @param [String, StringIO] discovery_document # The contents of the discovery document. def register_discovery_document(api, version, discovery_document) api = api.to_s version = version || 'v1' if discovery_document.kind_of?(StringIO) discovery_document.rewind discovery_document = discovery_document.string elsif discovery_document.respond_to?(:to_str) discovery_document = discovery_document.to_str else raise TypeError, "Expected String or StringIO, got #{discovery_document.class}." end @discovery_documents["#{api}:#{version}"] = MultiJson.load(discovery_document) end ## # Returns the parsed directory document. # # @return [Hash] The parsed JSON from the directory document. def directory_document return @directory_document ||= (begin response = self.execute!( :http_method => :get, :uri => self.directory_uri, :authenticated => false ) response.data end) end ## # Returns the parsed discovery document. # # @param [String, Symbol] api The API name. # @param [String] version The desired version of the API. # @return [Hash] The parsed JSON from the discovery document. def discovery_document(api, version=nil) api = api.to_s version = version || 'v1' return @discovery_documents["#{api}:#{version}"] ||= (begin response = self.execute!( :http_method => :get, :uri => self.discovery_uri(api, version), :authenticated => false ) response.data end) end ## # Returns all APIs published in the directory document. # # @return [Array] The list of available APIs. def discovered_apis @directory_apis ||= (begin document_base = self.directory_uri if self.directory_document && self.directory_document['items'] self.directory_document['items'].map do |discovery_document| Google::APIClient::API.new( document_base, discovery_document ) end else [] end end) end ## # Returns the service object for a given service name and service version. # # @param [String, Symbol] api The API name. # @param [String] version The desired version of the API. # # @return [Google::APIClient::API] The service object. def discovered_api(api, version=nil) if !api.kind_of?(String) && !api.kind_of?(Symbol) raise TypeError, "Expected String or Symbol, got #{api.class}." end api = api.to_s version = version || 'v1' return @discovered_apis["#{api}:#{version}"] ||= begin document_base = self.discovery_uri(api, version) discovery_document = self.discovery_document(api, version) if document_base && discovery_document Google::APIClient::API.new( document_base, discovery_document ) else nil end end end ## # Returns the method object for a given RPC name and service version. # # @param [String, Symbol] rpc_name The RPC name of the desired method. # @param [String, Symbol] api The API the method is within. # @param [String] version The desired version of the API. # # @return [Google::APIClient::Method] The method object. def discovered_method(rpc_name, api, version=nil) if !rpc_name.kind_of?(String) && !rpc_name.kind_of?(Symbol) raise TypeError, "Expected String or Symbol, got #{rpc_name.class}." end rpc_name = rpc_name.to_s api = api.to_s version = version || 'v1' service = self.discovered_api(api, version) if service.to_h[rpc_name] return service.to_h[rpc_name] else return nil end end ## # Returns the service object with the highest version number. # # @note <em>Warning</em>: This method should be used with great care. # As APIs are updated, minor differences between versions may cause # incompatibilities. Requesting a specific version will avoid this issue. # # @param [String, Symbol] api The name of the service. # # @return [Google::APIClient::API] The service object. def preferred_version(api) if !api.kind_of?(String) && !api.kind_of?(Symbol) raise TypeError, "Expected String or Symbol, got #{api.class}." end api = api.to_s return self.discovered_apis.detect do |a| a.name == api && a.preferred == true end end ## # Verifies an ID token against a server certificate. Used to ensure that # an ID token supplied by an untrusted client-side mechanism is valid. # Raises an error if the token is invalid or missing. def verify_id_token! require 'jwt' require 'openssl' @certificates ||= {} if !self.authorization.respond_to?(:id_token) raise ArgumentError, ( "Current authorization mechanism does not support ID tokens: " + "#{self.authorization.class.to_s}" ) elsif !self.authorization.id_token raise ArgumentError, ( "Could not verify ID token, ID token missing. " + "Scopes were: #{self.authorization.scope.inspect}" ) else check_cached_certs = lambda do valid = false for key, cert in @certificates begin self.authorization.decoded_id_token(cert.public_key) valid = true rescue JWT::DecodeError, Signet::UnsafeOperationError # Expected exception. Ignore, ID token has not been validated. end end valid end if check_cached_certs.call() return true end response = self.execute!( :http_method => :get, :uri => 'https://www.googleapis.com/oauth2/v1/certs', :authenticated => false ) @certificates.merge!( Hash[MultiJson.load(response.body).map do |key, cert| [key, OpenSSL::X509::Certificate.new(cert)] end] ) if check_cached_certs.call() return true else raise InvalidIDTokenError, "Could not verify ID token against any available certificate." end end return nil end ## # Generates a request. # # @option options [Google::APIClient::Method] :api_method # The method object or the RPC name of the method being executed. # @option options [Hash, Array] :parameters # The parameters to send to the method. # @option options [Hash, Array] :headers The HTTP headers for the request. # @option options [String] :body The body of the request. # @option options [String] :version ("v1") # The service version. Only used if `api_method` is a `String`. # @option options [#generate_authenticated_request] :authorization # The authorization mechanism for the response. Used only if # `:authenticated` is `true`. # @option options [TrueClass, FalseClass] :authenticated (true) # `true` if the request must be signed or somehow # authenticated, `false` otherwise. # # @return [Google::APIClient::Reference] The generated request. # # @example # request = client.generate_request( # :api_method => 'plus.activities.list', # :parameters => # {'collection' => 'public', 'userId' => 'me'} # ) def generate_request(options={}) options = { :api_client => self }.merge(options) return Google::APIClient::Request.new(options) end ## # Executes a request, wrapping it in a Result object. # # @param [Google::APIClient::Request, Hash, Array] params # Either a Google::APIClient::Request, a Hash, or an Array. # # If a Google::APIClient::Request, no other parameters are expected. # # If a Hash, the below parameters are handled. If an Array, the # parameters are assumed to be in the below order: # # - (Google::APIClient::Method) api_method: # The method object or the RPC name of the method being executed. # - (Hash, Array) parameters: # The parameters to send to the method. # - (String) body: The body of the request. # - (Hash, Array) headers: The HTTP headers for the request. # - (Hash) options: A set of options for the request, of which: # - (#generate_authenticated_request) :authorization (default: true) - # The authorization mechanism for the response. Used only if # `:authenticated` is `true`. # - (TrueClass, FalseClass) :authenticated (default: true) - # `true` if the request must be signed or somehow # authenticated, `false` otherwise. # - (TrueClass, FalseClass) :gzip (default: true) - # `true` if gzip enabled, `false` otherwise. # # @return [Google::APIClient::Result] The result from the API, nil if batch. # # @example # result = client.execute(batch_request) # # @example # plus = client.discovered_api('plus') # result = client.execute( # :api_method => plus.activities.list, # :parameters => {'collection' => 'public', 'userId' => 'me'} # ) # # @see Google::APIClient#generate_request def execute(*params) if params.first.kind_of?(Google::APIClient::Request) request = params.shift options = params.shift || {} else # This block of code allows us to accept multiple parameter passing # styles, and maintaining some backwards compatibility. # # Note: I'm extremely tempted to deprecate this style of execute call. if params.last.respond_to?(:to_hash) && params.size == 1 options = params.pop else options = {} end options[:api_method] = params.shift if params.size > 0 options[:parameters] = params.shift if params.size > 0 options[:body] = params.shift if params.size > 0 options[:headers] = params.shift if params.size > 0 options.update(params.shift) if params.size > 0 request = self.generate_request(options) end request.headers['User-Agent'] ||= '' + self.user_agent unless self.user_agent.nil? request.headers['Accept-Encoding'] ||= 'gzip' unless options[:gzip] == false request.parameters['key'] ||= self.key unless self.key.nil? request.parameters['userIp'] ||= self.user_ip unless self.user_ip.nil? connection = options[:connection] || self.connection request.authorization = options[:authorization] || self.authorization unless options[:authenticated] == false result = request.send(connection) if result.status == 401 && request.authorization.respond_to?(:refresh_token) && auto_refresh_token begin logger.debug("Attempting refresh of access token & retry of request") request.authorization.fetch_access_token! result = request.send(connection, true) rescue Signet::AuthorizationError # Ignore since we want the original error end end return result end ## # Same as Google::APIClient#execute, but raises an exception if there was # an error. # # @see Google::APIClient#execute def execute!(*params) result = self.execute(*params) if result.error? error_message = result.error_message case result.response.status when 400...500 exception_type = ClientError error_message ||= "A client error has occurred." when 500...600 exception_type = ServerError error_message ||= "A server error has occurred." else exception_type = TransmissionError error_message ||= "A transmission error has occurred." end raise exception_type, error_message end return result end protected ## # Resolves a URI template against the client's configured base. # # @api private # @param [String, Addressable::URI, Addressable::Template] template # The template to resolve. # @param [Hash] mapping The mapping that corresponds to the template. # @return [Addressable::URI] The expanded URI. def resolve_uri(template, mapping={}) @base_uri ||= Addressable::URI.new( :scheme => 'https', :host => self.host, :port => self.port ).normalize template = if template.kind_of?(Addressable::Template) template.pattern elsif template.respond_to?(:to_str) template.to_str else raise TypeError, "Expected String, Addressable::URI, or Addressable::Template, " + "got #{template.class}." end return Addressable::Template.new(@base_uri + template).expand(mapping) end end end require 'google/api_client/version'
ip2k/google-api-ruby-client
lib/google/api_client.rb
Ruby
apache-2.0
23,326
using Lucene.Net.QueryParsers.Flexible.Core.Messages; using Lucene.Net.QueryParsers.Flexible.Core.Parser; using System.Collections.Generic; namespace Lucene.Net.QueryParsers.Flexible.Core.Nodes { /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// <summary> /// A <see cref="BoostQueryNode"/> boosts the QueryNode tree which is under this node. /// So, it must only and always have one child. /// /// The boost value may vary from 0.0 to 1.0. /// </summary> public class BoostQueryNode : QueryNode { private float value = 0; /// <summary> /// Constructs a boost node /// </summary> /// <param name="query">the query to be boosted</param> /// <param name="value">the boost value, it may vary from 0.0 to 1.0</param> public BoostQueryNode(IQueryNode query, float value) { // LUCENENET: Factored out NLS/Message/IMessage so end users can optionally utilize the built-in .NET localization. // LUCENENET: Added paramName parameter and changed to the same error message as the default of ArgumentNullException. // However, we still need this to be an error type so it is not caught in StandardSyntaxParser. if (query is null) throw new QueryNodeError(QueryParserMessages.ARGUMENT_CANNOT_BE_NULL, nameof(query)); this.value = value; IsLeaf = false; Allocate(); Add(query); } /// <summary> /// Gets the single child which this node boosts. /// </summary> public virtual IQueryNode Child { get { IList<IQueryNode> children = GetChildren(); if (children is null || children.Count == 0) { return null; } return children[0]; } } /// <summary> /// Gets the boost value. It may vary from 0.0 to 1.0. /// </summary> public virtual float Value => this.value; /// <summary> /// Returns the boost value parsed to a string. /// </summary> /// <returns>the parsed value</returns> private string GetValueString() { float f = this.value; if (f == (long)f) return "" + (long)f; else return "" + f.ToString("0.0#######"); // LUCENENET TODO: Culture } public override string ToString() { return "<boost value='" + GetValueString() + "'>" + "\n" + Child.ToString() + "\n</boost>"; } public override string ToQueryString(IEscapeQuerySyntax escapeSyntaxParser) { if (Child is null) return ""; return Child.ToQueryString(escapeSyntaxParser) + "^" + GetValueString(); } public override IQueryNode CloneTree() { BoostQueryNode clone = (BoostQueryNode)base.CloneTree(); clone.value = this.value; return clone; } } }
apache/lucenenet
src/Lucene.Net.QueryParser/Flexible/Core/Nodes/BoostQueryNode.cs
C#
apache-2.0
3,972
'use strict'; module.exports = { get: 'SENSOR_MULTILEVEL_GET', getParser: () => ({ 'Sensor Type': 'Power (version 2)', Properties1: { Scale: 0, }, }), report: 'SENSOR_MULTILEVEL_REPORT', reportParser: report => { if (report && report.hasOwnProperty('Sensor Type') && report.hasOwnProperty('Sensor Value (Parsed)')) { if (report['Sensor Type'] === 'Power (version 2)') return report['Sensor Value (Parsed)']; } return null; }, };
timkouters/nl.timkouters.domitech
node_modules/homey-meshdriver/lib/zwave/system/capabilities/measure_power/SENSOR_MULTILEVEL.js
JavaScript
apache-2.0
454
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ namespace Google\Service\CloudVideoIntelligence; class GoogleCloudVideointelligenceV1p2beta1DetectedLandmark extends \Google\Model { /** * @var float */ public $confidence; /** * @var string */ public $name; protected $pointType = GoogleCloudVideointelligenceV1p2beta1NormalizedVertex::class; protected $pointDataType = ''; /** * @param float */ public function setConfidence($confidence) { $this->confidence = $confidence; } /** * @return float */ public function getConfidence() { return $this->confidence; } /** * @param string */ public function setName($name) { $this->name = $name; } /** * @return string */ public function getName() { return $this->name; } /** * @param GoogleCloudVideointelligenceV1p2beta1NormalizedVertex */ public function setPoint(GoogleCloudVideointelligenceV1p2beta1NormalizedVertex $point) { $this->point = $point; } /** * @return GoogleCloudVideointelligenceV1p2beta1NormalizedVertex */ public function getPoint() { return $this->point; } } // Adding a class alias for backwards compatibility with the previous class name. class_alias(GoogleCloudVideointelligenceV1p2beta1DetectedLandmark::class, 'Google_Service_CloudVideoIntelligence_GoogleCloudVideointelligenceV1p2beta1DetectedLandmark');
googleapis/google-api-php-client-services
src/CloudVideoIntelligence/GoogleCloudVideointelligenceV1p2beta1DetectedLandmark.php
PHP
apache-2.0
1,958
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from airflow import models import internal_unit_testing import pytest @pytest.fixture(autouse=True, scope="function") def set_variables(airflow_database): models.Variable.set("bucket_path", "gs://example_bucket") models.Variable.set("project_id", "example-project") models.Variable.set("gce_zone", "us-central1-f") yield models.Variable.delete('bucket_path') models.Variable.delete('project_id') models.Variable.delete('gce_zone') def test_dag_import(): """Test that the DAG file can be successfully imported. This tests that the DAG can be parsed, but does not run it in an Airflow environment. This is a recommended confidence check by the official Airflow docs: https://airflow.incubator.apache.org/tutorial.html#testing """ from . import dataflowtemplateoperator_tutorial as module internal_unit_testing.assert_has_valid_dag(module)
GoogleCloudPlatform/python-docs-samples
composer/workflows/dataflowtemplateoperator_tutorial_test.py
Python
apache-2.0
1,477
require 'spec_helper' describe 'quantum::plugins::ovs' do let :pre_condition do "class { 'quantum': rabbit_password => 'passw0rd' }" end let :default_params do { :package_ensure => 'present', :sql_connection => 'sqlite:////var/lib/quantum/ovs.sqlite', :sql_max_retries => 10, :reconnect_interval => 2, :tenant_network_type => 'vlan', :network_vlan_ranges => 'physnet1:1000:2000', :tunnel_id_ranges => '1:1000' } end let :params do {} end shared_examples_for 'quantum ovs plugin' do let :p do default_params.merge(params) end it 'should perform default configuration of' do should contain_quantum_plugin_ovs('OVS/network_vlan_ranges').\ with_value(p[:network_vlan_ranges]) should contain_quantum_plugin_ovs('DATABASE/sql_connection').with_value(p[:sql_connection]) should contain_quantum_plugin_ovs('DATABASE/sql_max_retries').with_value(p[:sql_max_retries]) should contain_quantum_plugin_ovs('DATABASE/reconnect_interval').with_value(p[:reconnect_interval]) should contain_quantum_plugin_ovs('OVS/tenant_network_type').with_value(p[:tenant_network_type]) should_not contain_quantum_plugin_ovs('OVS/tunnel_id_ranges') should contain_quantum_plugin_ovs('OVS/network_vlan_ranges').with_value(p[:network_vlan_ranges]) should contain_package('quantum-plugin-ovs').with( :name => platform_params[:ovs_server_package], :ensure => p[:package_ensure] ) end context 'with gre tunneling' do let :params do { :tenant_network_type => 'gre' } end it 'should perform gre network configuration' do should contain_quantum_plugin_ovs('OVS/network_vlan_ranges').with_ensure('absent') should contain_quantum_plugin_ovs('OVS/tenant_network_type').with_value(p[:tenant_network_type]) should contain_quantum_plugin_ovs('OVS/tunnel_id_ranges').with_value(p[:tunnel_id_ranges]) end end context 'with a flat network' do let :params do { :tenant_network_type => 'flat' } end it { should contain_quantum_plugin_ovs('OVS/network_vlan_ranges').with_value(p[:network_vlan_ranges]) } end end context 'on Debian platforms' do let :facts do { :osfamily => 'Debian' } end let :platform_params do { :ovs_server_package => 'quantum-plugin-openvswitch' } end it { should contain_class('quantum::plugins::ovs') } it_configures 'quantum ovs plugin' end context 'on RedHat platforms' do let :facts do { :osfamily => 'RedHat' } end let :platform_params do { :ovs_server_package => 'openstack-quantum-openvswitch' } end it { should contain_class('quantum::plugins::ovs') } it 'should perform redhat specific configuration' do should contain_file('/etc/quantum/plugin.ini').with( :ensure => 'link', :target => '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini', :require => 'Package[quantum-plugin-ovs]' ) end it_configures 'quantum ovs plugin' end end
enovance/openstack-quantum-puppet
spec/classes/quantum_plugins_ovs_spec.rb
Ruby
apache-2.0
3,147
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resolve import ( "context" "fmt" "reflect" "github.com/google/gapid/core/data/dictionary" "github.com/google/gapid/core/image" "github.com/google/gapid/core/math/sint" "github.com/google/gapid/core/os/device" "github.com/google/gapid/core/os/device/bind" "github.com/google/gapid/gapis/capture" "github.com/google/gapid/gapis/database" "github.com/google/gapid/gapis/messages" "github.com/google/gapid/gapis/service" "github.com/google/gapid/gapis/service/box" "github.com/google/gapid/gapis/service/path" ) // Capture resolves and returns the capture from the path p. func Capture(ctx context.Context, p *path.Capture) (*service.Capture, error) { c, err := capture.ResolveFromPath(ctx, p) if err != nil { return nil, err } return c.Service(ctx, p), nil } // Device resolves and returns the device from the path p. func Device(ctx context.Context, p *path.Device) (*device.Instance, error) { device := bind.GetRegistry(ctx).Device(p.Id.ID()) if device == nil { return nil, &service.ErrDataUnavailable{Reason: messages.ErrUnknownDevice()} } return device.Instance(), nil } // ImageInfo resolves and returns the ImageInfo from the path p. func ImageInfo(ctx context.Context, p *path.ImageInfo) (*image.Info, error) { obj, err := database.Resolve(ctx, p.Id.ID()) if err != nil { return nil, err } ii, ok := obj.(*image.Info) if !ok { return nil, fmt.Errorf("Path %s gave %T, expected *image.Info", p, obj) } return ii, err } // Blob resolves and returns the byte slice from the path p. func Blob(ctx context.Context, p *path.Blob) ([]byte, error) { obj, err := database.Resolve(ctx, p.Id.ID()) if err != nil { return nil, err } bytes, ok := obj.([]byte) if !ok { return nil, fmt.Errorf("Path %s gave %T, expected []byte", p, obj) } return bytes, nil } // Field resolves and returns the field from the path p. func Field(ctx context.Context, p *path.Field) (interface{}, error) { obj, err := ResolveInternal(ctx, p.Parent()) if err != nil { return nil, err } v, err := field(ctx, reflect.ValueOf(obj), p.Name, p) if err != nil { return nil, err } return v.Interface(), nil } func field(ctx context.Context, s reflect.Value, name string, p path.Node) (reflect.Value, error) { for { switch s.Kind() { case reflect.Struct: f := s.FieldByName(name) if !f.IsValid() { return reflect.Value{}, &service.ErrInvalidPath{ Reason: messages.ErrFieldDoesNotExist(typename(s.Type()), name), Path: p.Path(), } } return f, nil case reflect.Interface, reflect.Ptr: if s.IsNil() { return reflect.Value{}, &service.ErrInvalidPath{ Reason: messages.ErrNilPointerDereference(), Path: p.Path(), } } s = s.Elem() default: return reflect.Value{}, &service.ErrInvalidPath{ Reason: messages.ErrFieldDoesNotExist(typename(s.Type()), name), Path: p.Path(), } } } } // ArrayIndex resolves and returns the array or slice element from the path p. func ArrayIndex(ctx context.Context, p *path.ArrayIndex) (interface{}, error) { obj, err := ResolveInternal(ctx, p.Parent()) if err != nil { return nil, err } a := reflect.ValueOf(obj) switch { case box.IsMemorySlice(a.Type()): ml, err := memoryLayout(ctx, p) if err != nil { return nil, err } slice := box.AsMemorySlice(a) if count := slice.Count(); p.Index >= count { return nil, errPathOOB(p.Index, "Index", 0, count-1, p) } return slice.IIndex(p.Index, ml), nil default: switch a.Kind() { case reflect.Array, reflect.Slice, reflect.String: if count := uint64(a.Len()); p.Index >= count { return nil, errPathOOB(p.Index, "Index", 0, count-1, p) } return a.Index(int(p.Index)).Interface(), nil default: return nil, &service.ErrInvalidPath{ Reason: messages.ErrTypeNotArrayIndexable(typename(a.Type())), Path: p.Path(), } } } } // Slice resolves and returns the subslice from the path p. func Slice(ctx context.Context, p *path.Slice) (interface{}, error) { obj, err := ResolveInternal(ctx, p.Parent()) if err != nil { return nil, err } a := reflect.ValueOf(obj) switch { case box.IsMemorySlice(a.Type()): ml, err := memoryLayout(ctx, p) if err != nil { return nil, err } slice := box.AsMemorySlice(a) if p.Start >= slice.Count() || p.End > slice.Count() { return nil, errPathSliceOOB(p.Start, p.End, slice.Count(), p) } return slice.ISlice(p.Start, p.End, ml), nil default: switch a.Kind() { case reflect.Array, reflect.Slice, reflect.String: if int(p.Start) >= a.Len() || int(p.End) > a.Len() { return nil, errPathSliceOOB(p.Start, p.End, uint64(a.Len()), p) } return a.Slice(int(p.Start), int(p.End)).Interface(), nil default: return nil, &service.ErrInvalidPath{ Reason: messages.ErrTypeNotSliceable(typename(a.Type())), Path: p.Path(), } } } } // MapIndex resolves and returns the map value from the path p. func MapIndex(ctx context.Context, p *path.MapIndex) (interface{}, error) { obj, err := ResolveInternal(ctx, p.Parent()) if err != nil { return nil, err } d := dictionary.From(obj) if d == nil { return nil, &service.ErrInvalidPath{ Reason: messages.ErrTypeNotMapIndexable(typename(reflect.TypeOf(obj))), Path: p.Path(), } } key, ok := convert(reflect.ValueOf(p.KeyValue()), d.KeyTy()) if !ok { return nil, &service.ErrInvalidPath{ Reason: messages.ErrIncorrectMapKeyType( typename(reflect.TypeOf(p.KeyValue())), // got typename(d.KeyTy())), // expected Path: p.Path(), } } val, ok := d.Lookup(key.Interface()) if !ok { return nil, &service.ErrInvalidPath{ Reason: messages.ErrMapKeyDoesNotExist(key.Interface()), Path: p.Path(), } } return val, nil } // memoryLayout resolves the memory layout for the capture of the given path. func memoryLayout(ctx context.Context, p path.Node) (*device.MemoryLayout, error) { cp := path.FindCapture(p) if cp == nil { return nil, errPathNoCapture(p) } c, err := capture.ResolveFromPath(ctx, cp) if err != nil { return nil, err } return c.Header.Abi.MemoryLayout, nil } // ResolveService resolves and returns the object, value or memory at the path p, // converting the final result to the service representation. func ResolveService(ctx context.Context, p path.Node) (interface{}, error) { v, err := ResolveInternal(ctx, p) if err != nil { return nil, err } return internalToService(v) } // ResolveInternal resolves and returns the object, value or memory at the path // p without converting the potentially internal result to a service // representation. func ResolveInternal(ctx context.Context, p path.Node) (interface{}, error) { switch p := p.(type) { case *path.ArrayIndex: return ArrayIndex(ctx, p) case *path.As: return As(ctx, p) case *path.Blob: return Blob(ctx, p) case *path.Capture: return Capture(ctx, p) case *path.Command: return Cmd(ctx, p) case *path.Commands: return Commands(ctx, p) case *path.CommandTree: return CommandTree(ctx, p) case *path.CommandTreeNode: return CommandTreeNode(ctx, p) case *path.CommandTreeNodeForCommand: return CommandTreeNodeForCommand(ctx, p) case *path.ConstantSet: return ConstantSet(ctx, p) case *path.Context: return Context(ctx, p) case *path.Contexts: return Contexts(ctx, p) case *path.Device: return Device(ctx, p) case *path.Events: return Events(ctx, p) case *path.FramebufferObservation: return FramebufferObservation(ctx, p) case *path.Field: return Field(ctx, p) case *path.GlobalState: return GlobalState(ctx, p) case *path.ImageInfo: return ImageInfo(ctx, p) case *path.MapIndex: return MapIndex(ctx, p) case *path.Memory: return Memory(ctx, p) case *path.Mesh: return Mesh(ctx, p) case *path.Parameter: return Parameter(ctx, p) case *path.Report: return Report(ctx, p) case *path.ResourceData: return ResourceData(ctx, p) case *path.Resources: return Resources(ctx, p.Capture) case *path.Result: return Result(ctx, p) case *path.Slice: return Slice(ctx, p) case *path.State: return State(ctx, p) case *path.StateTree: return StateTree(ctx, p) case *path.StateTreeNode: return StateTreeNode(ctx, p) case *path.StateTreeNodeForPath: return StateTreeNodeForPath(ctx, p) case *path.Thumbnail: return Thumbnail(ctx, p) default: return nil, fmt.Errorf("Unknown path type %T", p) } } func typename(t reflect.Type) string { if s := t.Name(); len(s) > 0 { return s } switch t.Kind() { case reflect.Ptr: return "ptr<" + typename(t.Elem()) + ">" // TODO: Format other composite types? default: return t.String() } } func convert(val reflect.Value, ty reflect.Type) (reflect.Value, bool) { if !val.IsValid() { return reflect.Zero(ty), true } valTy := val.Type() if valTy == ty { return val, true } if valTy.ConvertibleTo(ty) { return val.Convert(ty), true } // slice -> array if valTy.Kind() == reflect.Slice && ty.Kind() == reflect.Array { if valTy.Elem().ConvertibleTo(ty.Elem()) { c := sint.Min(val.Len(), ty.Len()) out := reflect.New(ty).Elem() for i := 0; i < c; i++ { v, ok := convert(val.Index(i), ty.Elem()) if !ok { return val, false } out.Index(i).Set(v) } return out, true } } return val, false }
dsrbecky/gapid
gapis/resolve/resolve.go
GO
apache-2.0
9,899
package nl.tno.hexabus.api; import org.flexiblepower.ral.ResourceControlParameters; public interface HexabusControlParameters extends ResourceControlParameters { boolean isSwitchedOn(); }
flexiblepower/fpai-apps
flexiblepower.driver.smartplug.hexabus/src/nl/tno/hexabus/api/HexabusControlParameters.java
Java
apache-2.0
194
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * IndexCreationInternalsTest.java * JUnit based test * * Created on February 22, 2005, 11:24 AM */ package com.gemstone.gemfire.cache.query.internal; import java.util.Iterator; import junit.framework.TestCase; import org.junit.experimental.categories.Category; import com.gemstone.gemfire.cache.query.SelectResults; import com.gemstone.gemfire.cache.query.data.Portfolio; import com.gemstone.gemfire.cache.query.internal.types.StructTypeImpl; import com.gemstone.gemfire.cache.query.internal.types.TypeUtils; import com.gemstone.gemfire.cache.query.types.ObjectType; import com.gemstone.gemfire.test.junit.categories.UnitTest; /** * * @author ericz */ @Category(UnitTest.class) public class StructSetJUnitTest extends TestCase { public StructSetJUnitTest(String testName) { super(testName); } public void testIntersectionAndRetainAll() { String names[] = {"p","pos"}; ObjectType types[] = {TypeUtils.OBJECT_TYPE, TypeUtils.OBJECT_TYPE}; StructTypeImpl sType = new StructTypeImpl(names, types); StructSet set1 = new StructSet(sType); Portfolio ptf = new Portfolio(0); Iterator pIter = ptf.positions.values().iterator(); while(pIter.hasNext()){ Object arr[] = {ptf, pIter.next()}; set1.addFieldValues(arr); } StructSet set2 = new StructSet(sType); pIter = ptf.positions.values().iterator(); while(pIter.hasNext()){ Object arr[] = {ptf, pIter.next()}; set2.addFieldValues(arr); } assertEquals(2, set1.size()); assertEquals(2, set2.size()); // tests that retainAll does not modify set1 assertTrue(!set1.retainAll(set2)); assertEquals(2, set1.size()); assertEquals(2, set2.size()); SelectResults sr = QueryUtils.intersection(set1, set2, null); assertEquals(2, sr.size()); } }
robertgeiger/incubator-geode
gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/StructSetJUnitTest.java
Java
apache-2.0
2,624
package nkp.pspValidator.shared.engine.exceptions; /** * Created by Martin Řehánek on 11.11.16. */ public class SizeDifferenceException extends Exception { public SizeDifferenceException(String message) { super(message); } }
rzeh4n/psp-validator
sharedModule/src/main/java/nkp/pspValidator/shared/engine/exceptions/SizeDifferenceException.java
Java
apache-2.0
246
/* * Copyright 2015-2016 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.hawkular.alerts.actions.irc; import static org.junit.Assert.assertTrue; import org.schwering.irc.lib.IRCConfig; import org.schwering.irc.lib.IRCConfigBuilder; import org.schwering.irc.lib.IRCConnection; import org.schwering.irc.lib.IRCConnectionFactory; import org.schwering.irc.lib.IRCTrafficLogger; /** * @author Jay Shaughnessy * @author Lucas Ponce */ public class IrcPluginTest { public static String IRC_HOST = "irc.freenode.net"; public static int IRC_PORT = 6667; public static String IRC_USER = "hwk-alerts-bot"; public static String IRC_PASSWORD = "H4wk0l43"; public void testConnection() throws Exception { IRCConfig config = IRCConfigBuilder.newBuilder() .host(IRC_HOST) .port(IRC_PORT) .nick(IRC_USER) .username(IRC_USER) .password(IRC_PASSWORD) .realname(IRC_USER) .trafficLogger(IRCTrafficLogger.SYSTEM_OUT) .build(); IRCConnection conn = IRCConnectionFactory.newConnection(config); conn.connect(); assertTrue(conn.isConnected()); conn.doJoin("#hawkular-alerts"); conn.doPrivmsg("#hawkular-alerts", "Hello World!"); /* Wait to receive some response */ for (int i = 0; i < 15; i++) { Thread.sleep(1000); } conn.doQuit(); conn.close(); } }
jpkrohling/hawkular-alerts
hawkular-alerts-actions/hawkular-alerts-actions-plugins/hawkular-alerts-actions-irc/hawkular-alerts-actions-irc-plugin/src/test/java/org/hawkular/alerts/actions/irc/IrcPluginTest.java
Java
apache-2.0
2,113
package bamboo.util; import org.junit.Test; import static org.junit.Assert.assertEquals; public class UrlsTest { @Test public void testRemoveScheme() { assertEquals("www.nla.gov.au", Urls.removeScheme("http://www.nla.gov.au")); assertEquals("www.nla.gov.au", Urls.removeScheme("www.nla.gov.au")); } }
greg-pendlebury/bamboo
ui/test/bamboo/util/UrlsTest.java
Java
apache-2.0
333
import datetime import logging import multiprocessing import os import shutil from mimetypes import guess_type from typing import Any, Dict, Iterable, List, Optional, Tuple import orjson from bs4 import BeautifulSoup from django.conf import settings from django.core.cache import cache from django.db import connection from django.utils.timezone import now as timezone_now from psycopg2.extras import execute_values from psycopg2.sql import SQL, Identifier from analytics.models import RealmCount, StreamCount, UserCount from zerver.lib.actions import ( UserMessageLite, bulk_insert_ums, do_change_avatar_fields, do_change_realm_plan_type, ) from zerver.lib.avatar_hash import user_avatar_path_from_ids from zerver.lib.bulk_create import bulk_create_users, bulk_set_users_or_streams_recipient_fields from zerver.lib.export import DATE_FIELDS, Field, Path, Record, TableData, TableName from zerver.lib.markdown import markdown_convert from zerver.lib.markdown import version as markdown_version from zerver.lib.message import get_last_message_id from zerver.lib.server_initialization import create_internal_realm, server_initialized from zerver.lib.streams import render_stream_description from zerver.lib.timestamp import datetime_to_timestamp from zerver.lib.upload import BadImageError, get_bucket, sanitize_name, upload_backend from zerver.lib.utils import generate_api_key, process_list_in_batches from zerver.models import ( AlertWord, Attachment, BotConfigData, BotStorageData, Client, CustomProfileField, CustomProfileFieldValue, DefaultStream, GroupGroupMembership, Huddle, Message, MutedUser, Reaction, Realm, RealmAuditLog, RealmDomain, RealmEmoji, RealmFilter, RealmPlayground, RealmUserDefault, Recipient, Service, Stream, Subscription, UserActivity, UserActivityInterval, UserGroup, UserGroupMembership, UserHotspot, UserMessage, UserPresence, UserProfile, UserStatus, UserTopic, get_huddle_hash, get_realm, get_system_bot, get_user_profile_by_id, ) realm_tables = [ ("zerver_defaultstream", DefaultStream, "defaultstream"), ("zerver_realmemoji", RealmEmoji, "realmemoji"), ("zerver_realmdomain", RealmDomain, "realmdomain"), ("zerver_realmfilter", RealmFilter, "realmfilter"), ("zerver_realmplayground", RealmPlayground, "realmplayground"), ] # List[Tuple[TableName, Any, str]] # ID_MAP is a dictionary that maps table names to dictionaries # that map old ids to new ids. We use this in # re_map_foreign_keys and other places. # # We explicitly initialize ID_MAP with the tables that support # id re-mapping. # # Code reviewers: give these tables extra scrutiny, as we need to # make sure to reload related tables AFTER we re-map the ids. ID_MAP: Dict[str, Dict[int, int]] = { "alertword": {}, "client": {}, "user_profile": {}, "huddle": {}, "realm": {}, "stream": {}, "recipient": {}, "subscription": {}, "defaultstream": {}, "reaction": {}, "realmemoji": {}, "realmdomain": {}, "realmfilter": {}, "realmplayground": {}, "message": {}, "user_presence": {}, "userstatus": {}, "useractivity": {}, "useractivityinterval": {}, "usermessage": {}, "customprofilefield": {}, "customprofilefieldvalue": {}, "attachment": {}, "realmauditlog": {}, "recipient_to_huddle_map": {}, "userhotspot": {}, "usertopic": {}, "muteduser": {}, "service": {}, "usergroup": {}, "usergroupmembership": {}, "groupgroupmembership": {}, "botstoragedata": {}, "botconfigdata": {}, "analytics_realmcount": {}, "analytics_streamcount": {}, "analytics_usercount": {}, "realmuserdefault": {}, } id_map_to_list: Dict[str, Dict[int, List[int]]] = { "huddle_to_user_list": {}, } path_maps: Dict[str, Dict[str, str]] = { "attachment_path": {}, } def update_id_map(table: TableName, old_id: int, new_id: int) -> None: if table not in ID_MAP: raise Exception( f""" Table {table} is not initialized in ID_MAP, which could mean that we have not thought through circular dependencies. """ ) ID_MAP[table][old_id] = new_id def fix_datetime_fields(data: TableData, table: TableName) -> None: for item in data[table]: for field_name in DATE_FIELDS[table]: if item[field_name] is not None: item[field_name] = datetime.datetime.fromtimestamp( item[field_name], tz=datetime.timezone.utc ) def fix_upload_links(data: TableData, message_table: TableName) -> None: """ Because the URLs for uploaded files encode the realm ID of the organization being imported (which is only determined at import time), we need to rewrite the URLs of links to uploaded files during the import process. """ for message in data[message_table]: if message["has_attachment"] is True: for key, value in path_maps["attachment_path"].items(): if key in message["content"]: message["content"] = message["content"].replace(key, value) if message["rendered_content"]: message["rendered_content"] = message["rendered_content"].replace( key, value ) def create_subscription_events(data: TableData, realm_id: int) -> None: """ When the export data doesn't contain the table `zerver_realmauditlog`, this function creates RealmAuditLog objects for `subscription_created` type event for all the existing Stream subscriptions. This is needed for all the export tools which do not include the table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate data about when a user was subscribed is not exported by the third-party service. """ all_subscription_logs = [] event_last_message_id = get_last_message_id() event_time = timezone_now() recipient_id_to_stream_id = { d["id"]: d["type_id"] for d in data["zerver_recipient"] if d["type"] == Recipient.STREAM } for sub in data["zerver_subscription"]: recipient_id = sub["recipient_id"] stream_id = recipient_id_to_stream_id.get(recipient_id) if stream_id is None: continue user_id = sub["user_profile_id"] all_subscription_logs.append( RealmAuditLog( realm_id=realm_id, acting_user_id=user_id, modified_user_id=user_id, modified_stream_id=stream_id, event_last_message_id=event_last_message_id, event_time=event_time, event_type=RealmAuditLog.SUBSCRIPTION_CREATED, ) ) RealmAuditLog.objects.bulk_create(all_subscription_logs) def fix_service_tokens(data: TableData, table: TableName) -> None: """ The tokens in the services are created by 'generate_api_key'. As the tokens are unique, they should be re-created for the imports. """ for item in data[table]: item["token"] = generate_api_key() def process_huddle_hash(data: TableData, table: TableName) -> None: """ Build new huddle hashes with the updated ids of the users """ for huddle in data[table]: user_id_list = id_map_to_list["huddle_to_user_list"][huddle["id"]] huddle["huddle_hash"] = get_huddle_hash(user_id_list) def get_huddles_from_subscription(data: TableData, table: TableName) -> None: """ Extract the IDs of the user_profiles involved in a huddle from the subscription object This helps to generate a unique huddle hash from the updated user_profile ids """ id_map_to_list["huddle_to_user_list"] = { value: [] for value in ID_MAP["recipient_to_huddle_map"].values() } for subscription in data[table]: if subscription["recipient"] in ID_MAP["recipient_to_huddle_map"]: huddle_id = ID_MAP["recipient_to_huddle_map"][subscription["recipient"]] id_map_to_list["huddle_to_user_list"][huddle_id].append(subscription["user_profile_id"]) def fix_customprofilefield(data: TableData) -> None: """ In CustomProfileField with 'field_type' like 'USER', the IDs need to be re-mapped. """ field_type_USER_id_list = [] for item in data["zerver_customprofilefield"]: if item["field_type"] == CustomProfileField.USER: field_type_USER_id_list.append(item["id"]) for item in data["zerver_customprofilefieldvalue"]: if item["field_id"] in field_type_USER_id_list: old_user_id_list = orjson.loads(item["value"]) new_id_list = re_map_foreign_keys_many_to_many_internal( table="zerver_customprofilefieldvalue", field_name="value", related_table="user_profile", old_id_list=old_user_id_list, ) item["value"] = orjson.dumps(new_id_list).decode() def fix_message_rendered_content( realm: Realm, sender_map: Dict[int, Record], messages: List[Record] ) -> None: """ This function sets the rendered_content of all the messages after the messages have been imported from a non-Zulip platform. """ for message in messages: if message["rendered_content"] is not None: # For Zulip->Zulip imports, we use the original rendered # Markdown; this avoids issues where e.g. a mention can no # longer render properly because a user has changed their # name. # # However, we still need to update the data-user-id and # similar values stored on mentions, stream mentions, and # similar syntax in the rendered HTML. soup = BeautifulSoup(message["rendered_content"], "html.parser") user_mentions = soup.findAll("span", {"class": "user-mention"}) if len(user_mentions) != 0: user_id_map = ID_MAP["user_profile"] for mention in user_mentions: if not mention.has_attr("data-user-id"): # Legacy mentions don't have a data-user-id # field; we should just import them # unmodified. continue if mention["data-user-id"] == "*": # No rewriting is required for wildcard mentions continue old_user_id = int(mention["data-user-id"]) if old_user_id in user_id_map: mention["data-user-id"] = str(user_id_map[old_user_id]) message["rendered_content"] = str(soup) stream_mentions = soup.findAll("a", {"class": "stream"}) if len(stream_mentions) != 0: stream_id_map = ID_MAP["stream"] for mention in stream_mentions: old_stream_id = int(mention["data-stream-id"]) if old_stream_id in stream_id_map: mention["data-stream-id"] = str(stream_id_map[old_stream_id]) message["rendered_content"] = str(soup) user_group_mentions = soup.findAll("span", {"class": "user-group-mention"}) if len(user_group_mentions) != 0: user_group_id_map = ID_MAP["usergroup"] for mention in user_group_mentions: old_user_group_id = int(mention["data-user-group-id"]) if old_user_group_id in user_group_id_map: mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id]) message["rendered_content"] = str(soup) continue try: content = message["content"] sender_id = message["sender_id"] sender = sender_map[sender_id] sent_by_bot = sender["is_bot"] translate_emoticons = sender["translate_emoticons"] # We don't handle alert words on import from third-party # platforms, since they generally don't have an "alert # words" type feature, and notifications aren't important anyway. realm_alert_words_automaton = None rendered_content = markdown_convert( content=content, realm_alert_words_automaton=realm_alert_words_automaton, message_realm=realm, sent_by_bot=sent_by_bot, translate_emoticons=translate_emoticons, ).rendered_content message["rendered_content"] = rendered_content message["rendered_content_version"] = markdown_version except Exception: # This generally happens with two possible causes: # * rendering Markdown throwing an uncaught exception # * rendering Markdown failing with the exception being # caught in Markdown (which then returns None, causing the the # rendered_content assert above to fire). logging.warning( "Error in Markdown rendering for message ID %s; continuing", message["id"] ) def current_table_ids(data: TableData, table: TableName) -> List[int]: """ Returns the ids present in the current table """ id_list = [] for item in data[table]: id_list.append(item["id"]) return id_list def idseq(model_class: Any) -> str: if model_class == RealmDomain: return "zerver_realmalias_id_seq" elif model_class == BotStorageData: return "zerver_botuserstatedata_id_seq" elif model_class == BotConfigData: return "zerver_botuserconfigdata_id_seq" elif model_class == UserTopic: # The database table for this model was renamed from `mutedtopic` to # `usertopic`, but the name of the sequence object remained the same. return "zerver_mutedtopic_id_seq" return f"{model_class._meta.db_table}_id_seq" def allocate_ids(model_class: Any, count: int) -> List[int]: """ Increases the sequence number for a given table by the amount of objects being imported into that table. Hence, this gives a reserved range of IDs to import the converted Slack objects into the tables. """ conn = connection.cursor() sequence = idseq(model_class) conn.execute("select nextval(%s) from generate_series(1, %s)", [sequence, count]) query = conn.fetchall() # Each element in the result is a tuple like (5,) conn.close() # convert List[Tuple[int]] to List[int] return [item[0] for item in query] def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None: """ When Django gives us dict objects via model_to_dict, the foreign key fields are `foo`, but we want `foo_id` for the bulk insert. This function handles the simple case where we simply rename the fields. For cases where we need to munge ids in the database, see re_map_foreign_keys. """ for item in data[table]: item[field_name + "_id"] = item[field_name] del item[field_name] def re_map_foreign_keys( data: TableData, table: TableName, field_name: Field, related_table: TableName, verbose: bool = False, id_field: bool = False, recipient_field: bool = False, ) -> None: """ This is a wrapper function for all the realm data tables and only avatar and attachment records need to be passed through the internal function because of the difference in data format (TableData corresponding to realm data tables and List[Record] corresponding to the avatar and attachment records) """ # See comments in bulk_import_user_message_data. assert "usermessage" not in related_table re_map_foreign_keys_internal( data[table], table, field_name, related_table, verbose, id_field, recipient_field, ) def re_map_foreign_keys_internal( data_table: List[Record], table: TableName, field_name: Field, related_table: TableName, verbose: bool = False, id_field: bool = False, recipient_field: bool = False, ) -> None: """ We occasionally need to assign new ids to rows during the import/export process, to accommodate things like existing rows already being in tables. See bulk_import_client for more context. The tricky part is making sure that foreign key references are in sync with the new ids, and this fixer function does the re-mapping. (It also appends `_id` to the field.) """ lookup_table = ID_MAP[related_table] for item in data_table: old_id = item[field_name] if recipient_field: if related_table == "stream" and item["type"] == 2: pass elif related_table == "user_profile" and item["type"] == 1: pass elif related_table == "huddle" and item["type"] == 3: # save the recipient id with the huddle id, so that we can extract # the user_profile ids involved in a huddle with the help of the # subscription object # check function 'get_huddles_from_subscription' ID_MAP["recipient_to_huddle_map"][item["id"]] = lookup_table[old_id] else: continue old_id = item[field_name] if old_id in lookup_table: new_id = lookup_table[old_id] if verbose: logging.info( "Remapping %s %s from %s to %s", table, field_name + "_id", old_id, new_id ) else: new_id = old_id if not id_field: item[field_name + "_id"] = new_id del item[field_name] else: item[field_name] = new_id def re_map_realm_emoji_codes(data: TableData, *, table_name: str) -> None: """ Some tables, including Reaction and UserStatus, contain a form of foreign key reference to the RealmEmoji table in the form of `str(realm_emoji.id)` when `reaction_type="realm_emoji"`. See the block comment for emoji_code in the AbstractEmoji definition for more details. """ realm_emoji_dct = {} for row in data["zerver_realmemoji"]: realm_emoji_dct[row["id"]] = row for row in data[table_name]: if row["reaction_type"] == Reaction.REALM_EMOJI: old_realm_emoji_id = int(row["emoji_code"]) # Fail hard here if we didn't map correctly here new_realm_emoji_id = ID_MAP["realmemoji"][old_realm_emoji_id] # This is a very important sanity check. realm_emoji_row = realm_emoji_dct[new_realm_emoji_id] assert realm_emoji_row["name"] == row["emoji_name"] # Now update emoji_code to the new id. row["emoji_code"] = str(new_realm_emoji_id) def re_map_foreign_keys_many_to_many( data: TableData, table: TableName, field_name: Field, related_table: TableName, verbose: bool = False, ) -> None: """ We need to assign new ids to rows during the import/export process. The tricky part is making sure that foreign key references are in sync with the new ids, and this wrapper function does the re-mapping only for ManyToMany fields. """ for item in data[table]: old_id_list = item[field_name] new_id_list = re_map_foreign_keys_many_to_many_internal( table, field_name, related_table, old_id_list, verbose ) item[field_name] = new_id_list del item[field_name] def re_map_foreign_keys_many_to_many_internal( table: TableName, field_name: Field, related_table: TableName, old_id_list: List[int], verbose: bool = False, ) -> List[int]: """ This is an internal function for tables with ManyToMany fields, which takes the old ID list of the ManyToMany relation and returns the new updated ID list. """ lookup_table = ID_MAP[related_table] new_id_list = [] for old_id in old_id_list: if old_id in lookup_table: new_id = lookup_table[old_id] if verbose: logging.info( "Remapping %s %s from %s to %s", table, field_name + "_id", old_id, new_id ) else: new_id = old_id new_id_list.append(new_id) return new_id_list def fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None: for item in data[table]: item[field_name] = item[field_name + "_mask"] del item[field_name + "_mask"] def fix_realm_authentication_bitfield(data: TableData, table: TableName, field_name: Field) -> None: """Used to fixup the authentication_methods bitfield to be a string""" for item in data[table]: values_as_bitstring = "".join("1" if field[1] else "0" for field in item[field_name]) values_as_int = int(values_as_bitstring, 2) item[field_name] = values_as_int def remove_denormalized_recipient_column_from_data(data: TableData) -> None: """ The recipient column shouldn't be imported, we'll set the correct values when Recipient table gets imported. """ for stream_dict in data["zerver_stream"]: if "recipient" in stream_dict: del stream_dict["recipient"] for user_profile_dict in data["zerver_userprofile"]: if "recipient" in user_profile_dict: del user_profile_dict["recipient"] for huddle_dict in data["zerver_huddle"]: if "recipient" in huddle_dict: del huddle_dict["recipient"] def get_db_table(model_class: Any) -> str: """E.g. (RealmDomain -> 'zerver_realmdomain')""" return model_class._meta.db_table def update_model_ids(model: Any, data: TableData, related_table: TableName) -> None: table = get_db_table(model) # Important: remapping usermessage rows is # not only unnecessary, it's expensive and can cause # memory errors. We don't even use ids from ID_MAP. assert "usermessage" not in table old_id_list = current_table_ids(data, table) allocated_id_list = allocate_ids(model, len(data[table])) for item in range(len(data[table])): update_id_map(related_table, old_id_list[item], allocated_id_list[item]) re_map_foreign_keys(data, table, "id", related_table=related_table, id_field=True) def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None: model = UserMessage table = "zerver_usermessage" lst = data[table] # IMPORTANT NOTE: We do not use any primary id # data from either the import itself or ID_MAP. # We let the DB itself generate ids. Note that # no tables use user_message.id as a foreign key, # so we can safely avoid all re-mapping complexity. def process_batch(items: List[Dict[str, Any]]) -> None: ums = [ UserMessageLite( user_profile_id=item["user_profile_id"], message_id=item["message_id"], flags=item["flags"], ) for item in items ] bulk_insert_ums(ums) chunk_size = 10000 process_list_in_batches( lst=lst, chunk_size=chunk_size, process_batch=process_batch, ) logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id) def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str] = None) -> None: table = get_db_table(model) # TODO, deprecate dump_file_id model.objects.bulk_create(model(**item) for item in data[table]) if dump_file_id is None: logging.info("Successfully imported %s from %s.", model, table) else: logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id) # Client is a table shared by multiple realms, so in order to # correctly import multiple realms into the same server, we need to # check if a Client object already exists, and so we need to support # remap all Client IDs to the values in the new DB. def bulk_import_client(data: TableData, model: Any, table: TableName) -> None: for item in data[table]: try: client = Client.objects.get(name=item["name"]) except Client.DoesNotExist: client = Client.objects.create(name=item["name"]) update_id_map(table="client", old_id=item["id"], new_id=client.id) def fix_subscriptions_is_user_active_column( data: TableData, user_profiles: List[UserProfile] ) -> None: table = get_db_table(Subscription) user_id_to_active_status = {user.id: user.is_active for user in user_profiles} for sub in data[table]: sub["is_user_active"] = user_id_to_active_status[sub["user_profile_id"]] def process_avatars(record: Dict[str, Any]) -> None: # We need to re-import upload_backend here, because in the # import-export unit tests, the Zulip settings are overridden for # specific tests to control the choice of upload backend, and this # reimport ensures that we use the right choice for the current # test. Outside the test suite, settings never change after the # server is started, so this import will have no effect in production. from zerver.lib.upload import upload_backend if record["s3_path"].endswith(".original"): user_profile = get_user_profile_by_id(record["user_profile_id"]) if settings.LOCAL_UPLOADS_DIR is not None: avatar_path = user_avatar_path_from_ids(user_profile.id, record["realm_id"]) medium_file_path = ( os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path) + "-medium.png" ) if os.path.exists(medium_file_path): # We remove the image here primarily to deal with # issues when running the import script multiple # times in development (where one might reuse the # same realm ID from a previous iteration). os.remove(medium_file_path) try: upload_backend.ensure_avatar_image(user_profile=user_profile, is_medium=True) if record.get("importer_should_thumbnail"): upload_backend.ensure_avatar_image(user_profile=user_profile) except BadImageError: logging.warning( "Could not thumbnail avatar image for user %s; ignoring", user_profile.id, ) # Delete the record of the avatar to avoid 404s. do_change_avatar_fields( user_profile, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=None ) def import_uploads( realm: Realm, import_dir: Path, processes: int, processing_avatars: bool = False, processing_emojis: bool = False, processing_realm_icons: bool = False, ) -> None: if processing_avatars and processing_emojis: raise AssertionError("Cannot import avatars and emojis at the same time!") if processing_avatars: logging.info("Importing avatars") elif processing_emojis: logging.info("Importing emojis") elif processing_realm_icons: logging.info("Importing realm icons and logos") else: logging.info("Importing uploaded files") records_filename = os.path.join(import_dir, "records.json") with open(records_filename, "rb") as records_file: records: List[Dict[str, Any]] = orjson.loads(records_file.read()) timestamp = datetime_to_timestamp(timezone_now()) re_map_foreign_keys_internal( records, "records", "realm_id", related_table="realm", id_field=True ) if not processing_emojis and not processing_realm_icons: re_map_foreign_keys_internal( records, "records", "user_profile_id", related_table="user_profile", id_field=True ) s3_uploads = settings.LOCAL_UPLOADS_DIR is None if s3_uploads: if processing_avatars or processing_emojis or processing_realm_icons: bucket_name = settings.S3_AVATAR_BUCKET else: bucket_name = settings.S3_AUTH_UPLOADS_BUCKET bucket = get_bucket(bucket_name) count = 0 for record in records: count += 1 if count % 1000 == 0: logging.info("Processed %s/%s uploads", count, len(records)) if processing_avatars: # For avatars, we need to rehash the user ID with the # new server's avatar salt relative_path = user_avatar_path_from_ids(record["user_profile_id"], record["realm_id"]) if record["s3_path"].endswith(".original"): relative_path += ".original" else: # TODO: This really should be unconditional. However, # until we fix the S3 upload backend to use the .png # path suffix for its normal avatar URLs, we need to # only do this for the LOCAL_UPLOADS_DIR backend. if not s3_uploads: relative_path += ".png" elif processing_emojis: # For emojis we follow the function 'upload_emoji_image' relative_path = RealmEmoji.PATH_ID_TEMPLATE.format( realm_id=record["realm_id"], emoji_file_name=record["file_name"] ) record["last_modified"] = timestamp elif processing_realm_icons: icon_name = os.path.basename(record["path"]) relative_path = os.path.join(str(record["realm_id"]), "realm", icon_name) record["last_modified"] = timestamp else: # This relative_path is basically the new location of the file, # which will later be copied from its original location as # specified in record["s3_path"]. relative_path = upload_backend.generate_message_upload_path( str(record["realm_id"]), sanitize_name(os.path.basename(record["path"])) ) path_maps["attachment_path"][record["s3_path"]] = relative_path if s3_uploads: key = bucket.Object(relative_path) metadata = {} if processing_emojis and "user_profile_id" not in record: # Exported custom emoji from tools like Slack don't have # the data for what user uploaded them in `user_profile_id`. pass elif processing_realm_icons and "user_profile_id" not in record: # Exported realm icons and logos from local export don't have # the value of user_profile_id in the associated record. pass else: user_profile_id = int(record["user_profile_id"]) # Support email gateway bot and other cross-realm messages if user_profile_id in ID_MAP["user_profile"]: logging.info("Uploaded by ID mapped user: %s!", user_profile_id) user_profile_id = ID_MAP["user_profile"][user_profile_id] user_profile = get_user_profile_by_id(user_profile_id) metadata["user_profile_id"] = str(user_profile.id) if "last_modified" in record: metadata["orig_last_modified"] = str(record["last_modified"]) metadata["realm_id"] = str(record["realm_id"]) # Zulip exports will always have a content-type, but third-party exports might not. content_type = record.get("content_type") if content_type is None: content_type = guess_type(record["s3_path"])[0] if content_type is None: # This is the default for unknown data. Note that # for `.original` files, this is the value we'll # set; that is OK, because those are never served # directly anyway. content_type = "application/octet-stream" key.upload_file( Filename=os.path.join(import_dir, record["path"]), ExtraArgs={"ContentType": content_type, "Metadata": metadata}, ) else: assert settings.LOCAL_UPLOADS_DIR is not None if processing_avatars or processing_emojis or processing_realm_icons: file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", relative_path) else: file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", relative_path) orig_file_path = os.path.join(import_dir, record["path"]) os.makedirs(os.path.dirname(file_path), exist_ok=True) shutil.copy(orig_file_path, file_path) if processing_avatars: # Ensure that we have medium-size avatar images for every # avatar. TODO: This implementation is hacky, both in that it # does get_user_profile_by_id for each user, and in that it # might be better to require the export to just have these. if processes == 1: for record in records: process_avatars(record) else: connection.close() cache._cache.disconnect_all() with multiprocessing.Pool(processes) as p: for out in p.imap_unordered(process_avatars, records): pass # Importing data suffers from a difficult ordering problem because of # models that reference each other circularly. Here is a correct order. # # * Client [no deps] # * Realm [-notifications_stream] # * Stream [only depends on realm] # * Realm's notifications_stream # * Now can do all realm_tables # * UserProfile, in order by ID to avoid bot loop issues # * Huddle # * Recipient # * Subscription # * Message # * UserMessage # # Because the Python object => JSON conversion process is not fully # faithful, we have to use a set of fixers (e.g. on DateTime objects # and foreign keys) to do the import correctly. def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Realm: logging.info("Importing realm dump %s", import_dir) if not os.path.exists(import_dir): raise Exception("Missing import directory!") realm_data_filename = os.path.join(import_dir, "realm.json") if not os.path.exists(realm_data_filename): raise Exception("Missing realm.json file!") if not server_initialized(): create_internal_realm() logging.info("Importing realm data from %s", realm_data_filename) with open(realm_data_filename, "rb") as f: data = orjson.loads(f.read()) remove_denormalized_recipient_column_from_data(data) sort_by_date = data.get("sort_by_date", False) bulk_import_client(data, Client, "zerver_client") # We don't import the Stream model yet, since it depends on Realm, # which isn't imported yet. But we need the Stream model IDs for # notifications_stream. update_model_ids(Stream, data, "stream") re_map_foreign_keys(data, "zerver_realm", "notifications_stream", related_table="stream") re_map_foreign_keys(data, "zerver_realm", "signup_notifications_stream", related_table="stream") fix_datetime_fields(data, "zerver_realm") # Fix realm subdomain information data["zerver_realm"][0]["string_id"] = subdomain data["zerver_realm"][0]["name"] = subdomain fix_realm_authentication_bitfield(data, "zerver_realm", "authentication_methods") update_model_ids(Realm, data, "realm") realm = Realm(**data["zerver_realm"][0]) if realm.notifications_stream_id is not None: notifications_stream_id: Optional[int] = int(realm.notifications_stream_id) else: notifications_stream_id = None realm.notifications_stream_id = None if realm.signup_notifications_stream_id is not None: signup_notifications_stream_id: Optional[int] = int(realm.signup_notifications_stream_id) else: signup_notifications_stream_id = None realm.signup_notifications_stream_id = None realm.save() # Email tokens will automatically be randomly generated when the # Stream objects are created by Django. fix_datetime_fields(data, "zerver_stream") re_map_foreign_keys(data, "zerver_stream", "realm", related_table="realm") # Handle rendering of stream descriptions for import from non-Zulip for stream in data["zerver_stream"]: stream["rendered_description"] = render_stream_description(stream["description"]) bulk_import_model(data, Stream) realm.notifications_stream_id = notifications_stream_id realm.signup_notifications_stream_id = signup_notifications_stream_id realm.save() # Remap the user IDs for notification_bot and friends to their # appropriate IDs on this server internal_realm = get_realm(settings.SYSTEM_BOT_REALM) for item in data["zerver_userprofile_crossrealm"]: logging.info( "Adding to ID map: %s %s", item["id"], get_system_bot(item["email"], internal_realm.id).id, ) new_user_id = get_system_bot(item["email"], internal_realm.id).id update_id_map(table="user_profile", old_id=item["id"], new_id=new_user_id) new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id update_id_map(table="recipient", old_id=item["recipient_id"], new_id=new_recipient_id) # Merge in zerver_userprofile_mirrordummy data["zerver_userprofile"] = data["zerver_userprofile"] + data["zerver_userprofile_mirrordummy"] del data["zerver_userprofile_mirrordummy"] data["zerver_userprofile"].sort(key=lambda r: r["id"]) # To remap foreign key for UserProfile.last_active_message_id update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date) fix_datetime_fields(data, "zerver_userprofile") update_model_ids(UserProfile, data, "user_profile") re_map_foreign_keys(data, "zerver_userprofile", "realm", related_table="realm") re_map_foreign_keys(data, "zerver_userprofile", "bot_owner", related_table="user_profile") re_map_foreign_keys( data, "zerver_userprofile", "default_sending_stream", related_table="stream" ) re_map_foreign_keys( data, "zerver_userprofile", "default_events_register_stream", related_table="stream" ) re_map_foreign_keys( data, "zerver_userprofile", "last_active_message_id", related_table="message", id_field=True ) for user_profile_dict in data["zerver_userprofile"]: user_profile_dict["password"] = None user_profile_dict["api_key"] = generate_api_key() # Since Zulip doesn't use these permissions, drop them del user_profile_dict["user_permissions"] del user_profile_dict["groups"] # The short_name field is obsolete in Zulip, but it's # convenient for third party exports to populate it. if "short_name" in user_profile_dict: del user_profile_dict["short_name"] user_profiles = [UserProfile(**item) for item in data["zerver_userprofile"]] for user_profile in user_profiles: user_profile.set_unusable_password() UserProfile.objects.bulk_create(user_profiles) re_map_foreign_keys(data, "zerver_defaultstream", "stream", related_table="stream") re_map_foreign_keys(data, "zerver_realmemoji", "author", related_table="user_profile") for (table, model, related_table) in realm_tables: re_map_foreign_keys(data, table, "realm", related_table="realm") update_model_ids(model, data, related_table) bulk_import_model(data, model) # Ensure RealmEmoji get the .author set to a reasonable default, if the value # wasn't provided in the import data. first_user_profile = ( UserProfile.objects.filter(realm=realm, is_active=True, role=UserProfile.ROLE_REALM_OWNER) .order_by("id") .first() ) for realm_emoji in RealmEmoji.objects.filter(realm=realm): if realm_emoji.author_id is None: realm_emoji.author_id = first_user_profile.id realm_emoji.save(update_fields=["author_id"]) if "zerver_huddle" in data: update_model_ids(Huddle, data, "huddle") # We don't import Huddle yet, since we don't have the data to # compute huddle hashes until we've imported some of the # tables below. # TODO: double-check this. re_map_foreign_keys( data, "zerver_recipient", "type_id", related_table="stream", recipient_field=True, id_field=True, ) re_map_foreign_keys( data, "zerver_recipient", "type_id", related_table="user_profile", recipient_field=True, id_field=True, ) re_map_foreign_keys( data, "zerver_recipient", "type_id", related_table="huddle", recipient_field=True, id_field=True, ) update_model_ids(Recipient, data, "recipient") bulk_import_model(data, Recipient) bulk_set_users_or_streams_recipient_fields(Stream, Stream.objects.filter(realm=realm)) bulk_set_users_or_streams_recipient_fields(UserProfile, UserProfile.objects.filter(realm=realm)) re_map_foreign_keys(data, "zerver_subscription", "user_profile", related_table="user_profile") get_huddles_from_subscription(data, "zerver_subscription") re_map_foreign_keys(data, "zerver_subscription", "recipient", related_table="recipient") update_model_ids(Subscription, data, "subscription") fix_subscriptions_is_user_active_column(data, user_profiles) bulk_import_model(data, Subscription) if "zerver_realmauditlog" in data: fix_datetime_fields(data, "zerver_realmauditlog") re_map_foreign_keys(data, "zerver_realmauditlog", "realm", related_table="realm") re_map_foreign_keys( data, "zerver_realmauditlog", "modified_user", related_table="user_profile" ) re_map_foreign_keys( data, "zerver_realmauditlog", "acting_user", related_table="user_profile" ) re_map_foreign_keys(data, "zerver_realmauditlog", "modified_stream", related_table="stream") update_model_ids(RealmAuditLog, data, related_table="realmauditlog") bulk_import_model(data, RealmAuditLog) else: logging.info("about to call create_subscription_events") create_subscription_events( data=data, realm_id=realm.id, ) logging.info("done with create_subscription_events") # Ensure the invariant that there's always a realm-creation audit # log event, even if the export was generated by an export tool # that does not create RealmAuditLog events. if not RealmAuditLog.objects.filter( realm=realm, event_type=RealmAuditLog.REALM_CREATED ).exists(): RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created, # Mark these as backfilled, since they weren't created # when the realm was actually created, and thus do not # have the creating user associated with them. backfilled=True, ) if "zerver_huddle" in data: process_huddle_hash(data, "zerver_huddle") bulk_import_model(data, Huddle) for huddle in Huddle.objects.filter(recipient_id=None): recipient = Recipient.objects.get(type=Recipient.HUDDLE, type_id=huddle.id) huddle.recipient = recipient huddle.save(update_fields=["recipient"]) if "zerver_alertword" in data: re_map_foreign_keys(data, "zerver_alertword", "user_profile", related_table="user_profile") re_map_foreign_keys(data, "zerver_alertword", "realm", related_table="realm") update_model_ids(AlertWord, data, "alertword") bulk_import_model(data, AlertWord) if "zerver_userhotspot" in data: fix_datetime_fields(data, "zerver_userhotspot") re_map_foreign_keys(data, "zerver_userhotspot", "user", related_table="user_profile") update_model_ids(UserHotspot, data, "userhotspot") bulk_import_model(data, UserHotspot) if "zerver_usertopic" in data: fix_datetime_fields(data, "zerver_usertopic") re_map_foreign_keys(data, "zerver_usertopic", "user_profile", related_table="user_profile") re_map_foreign_keys(data, "zerver_usertopic", "stream", related_table="stream") re_map_foreign_keys(data, "zerver_usertopic", "recipient", related_table="recipient") update_model_ids(UserTopic, data, "usertopic") bulk_import_model(data, UserTopic) if "zerver_muteduser" in data: fix_datetime_fields(data, "zerver_muteduser") re_map_foreign_keys(data, "zerver_muteduser", "user_profile", related_table="user_profile") re_map_foreign_keys(data, "zerver_muteduser", "muted_user", related_table="user_profile") update_model_ids(MutedUser, data, "muteduser") bulk_import_model(data, MutedUser) if "zerver_service" in data: re_map_foreign_keys(data, "zerver_service", "user_profile", related_table="user_profile") fix_service_tokens(data, "zerver_service") update_model_ids(Service, data, "service") bulk_import_model(data, Service) if "zerver_usergroup" in data: re_map_foreign_keys(data, "zerver_usergroup", "realm", related_table="realm") re_map_foreign_keys_many_to_many( data, "zerver_usergroup", "direct_members", related_table="user_profile" ) re_map_foreign_keys_many_to_many( data, "zerver_usergroup", "direct_subgroups", related_table="usergroup" ) update_model_ids(UserGroup, data, "usergroup") bulk_import_model(data, UserGroup) re_map_foreign_keys( data, "zerver_usergroupmembership", "user_group", related_table="usergroup" ) re_map_foreign_keys( data, "zerver_usergroupmembership", "user_profile", related_table="user_profile" ) update_model_ids(UserGroupMembership, data, "usergroupmembership") bulk_import_model(data, UserGroupMembership) re_map_foreign_keys( data, "zerver_groupgroupmembership", "supergroup", related_table="usergroup" ) re_map_foreign_keys( data, "zerver_groupgroupmembership", "subgroup", related_table="usergroup" ) update_model_ids(GroupGroupMembership, data, "groupgroupmembership") bulk_import_model(data, GroupGroupMembership) if "zerver_botstoragedata" in data: re_map_foreign_keys( data, "zerver_botstoragedata", "bot_profile", related_table="user_profile" ) update_model_ids(BotStorageData, data, "botstoragedata") bulk_import_model(data, BotStorageData) if "zerver_botconfigdata" in data: re_map_foreign_keys( data, "zerver_botconfigdata", "bot_profile", related_table="user_profile" ) update_model_ids(BotConfigData, data, "botconfigdata") bulk_import_model(data, BotConfigData) if "zerver_realmuserdefault" in data: re_map_foreign_keys(data, "zerver_realmuserdefault", "realm", related_table="realm") update_model_ids(RealmUserDefault, data, "realmuserdefault") bulk_import_model(data, RealmUserDefault) # Create RealmUserDefault table with default values if not created # already from the import data; this can happen when importing # data from another product. if not RealmUserDefault.objects.filter(realm=realm).exists(): RealmUserDefault.objects.create(realm=realm) fix_datetime_fields(data, "zerver_userpresence") re_map_foreign_keys(data, "zerver_userpresence", "user_profile", related_table="user_profile") re_map_foreign_keys(data, "zerver_userpresence", "client", related_table="client") re_map_foreign_keys(data, "zerver_userpresence", "realm", related_table="realm") update_model_ids(UserPresence, data, "user_presence") bulk_import_model(data, UserPresence) fix_datetime_fields(data, "zerver_useractivity") re_map_foreign_keys(data, "zerver_useractivity", "user_profile", related_table="user_profile") re_map_foreign_keys(data, "zerver_useractivity", "client", related_table="client") update_model_ids(UserActivity, data, "useractivity") bulk_import_model(data, UserActivity) fix_datetime_fields(data, "zerver_useractivityinterval") re_map_foreign_keys( data, "zerver_useractivityinterval", "user_profile", related_table="user_profile" ) update_model_ids(UserActivityInterval, data, "useractivityinterval") bulk_import_model(data, UserActivityInterval) re_map_foreign_keys(data, "zerver_customprofilefield", "realm", related_table="realm") update_model_ids(CustomProfileField, data, related_table="customprofilefield") bulk_import_model(data, CustomProfileField) re_map_foreign_keys( data, "zerver_customprofilefieldvalue", "user_profile", related_table="user_profile" ) re_map_foreign_keys( data, "zerver_customprofilefieldvalue", "field", related_table="customprofilefield" ) fix_customprofilefield(data) update_model_ids(CustomProfileFieldValue, data, related_table="customprofilefieldvalue") bulk_import_model(data, CustomProfileFieldValue) # Import uploaded files and avatars import_uploads(realm, os.path.join(import_dir, "avatars"), processes, processing_avatars=True) import_uploads(realm, os.path.join(import_dir, "uploads"), processes) # We need to have this check as the emoji files are only present in the data # importer from Slack # For Zulip export, this doesn't exist if os.path.exists(os.path.join(import_dir, "emoji")): import_uploads(realm, os.path.join(import_dir, "emoji"), processes, processing_emojis=True) if os.path.exists(os.path.join(import_dir, "realm_icons")): import_uploads( realm, os.path.join(import_dir, "realm_icons"), processes, processing_realm_icons=True ) sender_map = {user["id"]: user for user in data["zerver_userprofile"]} # Import zerver_message and zerver_usermessage import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir) re_map_foreign_keys(data, "zerver_reaction", "message", related_table="message") re_map_foreign_keys(data, "zerver_reaction", "user_profile", related_table="user_profile") re_map_realm_emoji_codes(data, table_name="zerver_reaction") update_model_ids(Reaction, data, "reaction") bulk_import_model(data, Reaction) # Similarly, we need to recalculate the first_message_id for stream objects. for stream in Stream.objects.filter(realm=realm): recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id) first_message = Message.objects.filter(recipient=recipient).first() if first_message is None: stream.first_message_id = None else: stream.first_message_id = first_message.id stream.save(update_fields=["first_message_id"]) if "zerver_userstatus" in data: fix_datetime_fields(data, "zerver_userstatus") re_map_foreign_keys(data, "zerver_userstatus", "user_profile", related_table="user_profile") re_map_foreign_keys(data, "zerver_userstatus", "client", related_table="client") update_model_ids(UserStatus, data, "userstatus") re_map_realm_emoji_codes(data, table_name="zerver_userstatus") bulk_import_model(data, UserStatus) # Do attachments AFTER message data is loaded. # TODO: de-dup how we read these json files. fn = os.path.join(import_dir, "attachment.json") if not os.path.exists(fn): raise Exception("Missing attachment.json file!") logging.info("Importing attachment data from %s", fn) with open(fn, "rb") as f: data = orjson.loads(f.read()) import_attachments(data) # Import the analytics file. import_analytics_data(realm=realm, import_dir=import_dir) if settings.BILLING_ENABLED: do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=None) else: do_change_realm_plan_type(realm, Realm.PLAN_TYPE_SELF_HOSTED, acting_user=None) return realm # create_users and do_import_system_bots differ from their equivalent # in zerver/lib/server_initialization.py because here we check if the # bots don't already exist and only then create a user for these bots. def do_import_system_bots(realm: Any) -> None: internal_bots = [ (bot["name"], bot["email_template"] % (settings.INTERNAL_BOT_DOMAIN,)) for bot in settings.INTERNAL_BOTS ] create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT) print("Finished importing system bots.") def create_users( realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int] = None ) -> None: user_set = set() for full_name, email in name_list: if not UserProfile.objects.filter(email=email): user_set.add((email, full_name, True)) bulk_create_users(realm, user_set, bot_type) def update_message_foreign_keys(import_dir: Path, sort_by_date: bool) -> None: old_id_list = get_incoming_message_ids( import_dir=import_dir, sort_by_date=sort_by_date, ) count = len(old_id_list) new_id_list = allocate_ids(model_class=Message, count=count) for old_id, new_id in zip(old_id_list, new_id_list): update_id_map( table="message", old_id=old_id, new_id=new_id, ) # We don't touch user_message keys here; that happens later when # we're actually read the files a second time to get actual data. def get_incoming_message_ids(import_dir: Path, sort_by_date: bool) -> List[int]: """ This function reads in our entire collection of message ids, which can be millions of integers for some installations. And then we sort the list. This is necessary to ensure that the sort order of incoming ids matches the sort order of date_sent, which isn't always guaranteed by our utilities that convert third party chat data. We also need to move our ids to a new range if we're dealing with a server that has data for other realms. """ if sort_by_date: tups: List[Tuple[int, int]] = [] else: message_ids: List[int] = [] dump_file_id = 1 while True: message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json") if not os.path.exists(message_filename): break with open(message_filename, "rb") as f: data = orjson.loads(f.read()) # Aggressively free up memory. del data["zerver_usermessage"] for row in data["zerver_message"]: # We truncate date_sent to int to theoretically # save memory and speed up the sort. For # Zulip-to-Zulip imports, the # message_id will generally be a good tiebreaker. # If we occasionally mis-order the ids for two # messages from the same second, it's not the # end of the world, as it's likely those messages # arrived to the original server in somewhat # arbitrary order. message_id = row["id"] if sort_by_date: date_sent = int(row["date_sent"]) tup = (date_sent, message_id) tups.append(tup) else: message_ids.append(message_id) dump_file_id += 1 if sort_by_date: tups.sort() message_ids = [tup[1] for tup in tups] return message_ids def import_message_data(realm: Realm, sender_map: Dict[int, Record], import_dir: Path) -> None: dump_file_id = 1 while True: message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json") if not os.path.exists(message_filename): break with open(message_filename, "rb") as f: data = orjson.loads(f.read()) logging.info("Importing message dump %s", message_filename) re_map_foreign_keys(data, "zerver_message", "sender", related_table="user_profile") re_map_foreign_keys(data, "zerver_message", "recipient", related_table="recipient") re_map_foreign_keys(data, "zerver_message", "sending_client", related_table="client") fix_datetime_fields(data, "zerver_message") # Parser to update message content with the updated attachment URLs fix_upload_links(data, "zerver_message") # We already create mappings for zerver_message ids # in update_message_foreign_keys(), so here we simply # apply them. message_id_map = ID_MAP["message"] for row in data["zerver_message"]: row["id"] = message_id_map[row["id"]] for row in data["zerver_usermessage"]: assert row["message"] in message_id_map fix_message_rendered_content( realm=realm, sender_map=sender_map, messages=data["zerver_message"], ) logging.info("Successfully rendered Markdown for message batch") # A LOT HAPPENS HERE. # This is where we actually import the message data. bulk_import_model(data, Message) # Due to the structure of these message chunks, we're # guaranteed to have already imported all the Message objects # for this batch of UserMessage objects. re_map_foreign_keys(data, "zerver_usermessage", "message", related_table="message") re_map_foreign_keys( data, "zerver_usermessage", "user_profile", related_table="user_profile" ) fix_bitfield_keys(data, "zerver_usermessage", "flags") bulk_import_user_message_data(data, dump_file_id) dump_file_id += 1 def import_attachments(data: TableData) -> None: # Clean up the data in zerver_attachment that is not # relevant to our many-to-many import. fix_datetime_fields(data, "zerver_attachment") re_map_foreign_keys(data, "zerver_attachment", "owner", related_table="user_profile") re_map_foreign_keys(data, "zerver_attachment", "realm", related_table="realm") # Configure ourselves. Django models many-to-many (m2m) # relations asymmetrically. The parent here refers to the # Model that has the ManyToManyField. It is assumed here # the child models have been loaded, but we are in turn # responsible for loading the parents and the m2m rows. parent_model = Attachment parent_db_table_name = "zerver_attachment" parent_singular = "attachment" child_singular = "message" child_plural = "messages" m2m_table_name = "zerver_attachment_messages" parent_id = "attachment_id" child_id = "message_id" update_model_ids(parent_model, data, "attachment") # We don't bulk_import_model yet, because we need to first compute # the many-to-many for this table. # First, build our list of many-to-many (m2m) rows. # We do this in a slightly convoluted way to anticipate # a future where we may need to call re_map_foreign_keys. m2m_rows: List[Record] = [] for parent_row in data[parent_db_table_name]: for fk_id in parent_row[child_plural]: m2m_row: Record = {} m2m_row[parent_singular] = parent_row["id"] m2m_row[child_singular] = ID_MAP["message"][fk_id] m2m_rows.append(m2m_row) # Create our table data for insert. m2m_data: TableData = {m2m_table_name: m2m_rows} convert_to_id_fields(m2m_data, m2m_table_name, parent_singular) convert_to_id_fields(m2m_data, m2m_table_name, child_singular) m2m_rows = m2m_data[m2m_table_name] # Next, delete out our child data from the parent rows. for parent_row in data[parent_db_table_name]: del parent_row[child_plural] # Update 'path_id' for the attachments for attachment in data[parent_db_table_name]: attachment["path_id"] = path_maps["attachment_path"][attachment["path_id"]] # Next, load the parent rows. bulk_import_model(data, parent_model) # Now, go back to our m2m rows. # TODO: Do this the kosher Django way. We may find a # better way to do this in Django 1.9 particularly. with connection.cursor() as cursor: sql_template = SQL( """ INSERT INTO {m2m_table_name} ({parent_id}, {child_id}) VALUES %s """ ).format( m2m_table_name=Identifier(m2m_table_name), parent_id=Identifier(parent_id), child_id=Identifier(child_id), ) tups = [(row[parent_id], row[child_id]) for row in m2m_rows] execute_values(cursor.cursor, sql_template, tups) logging.info("Successfully imported M2M table %s", m2m_table_name) def import_analytics_data(realm: Realm, import_dir: Path) -> None: analytics_filename = os.path.join(import_dir, "analytics.json") if not os.path.exists(analytics_filename): return logging.info("Importing analytics data from %s", analytics_filename) with open(analytics_filename, "rb") as f: data = orjson.loads(f.read()) # Process the data through the fixer functions. fix_datetime_fields(data, "analytics_realmcount") re_map_foreign_keys(data, "analytics_realmcount", "realm", related_table="realm") update_model_ids(RealmCount, data, "analytics_realmcount") bulk_import_model(data, RealmCount) fix_datetime_fields(data, "analytics_usercount") re_map_foreign_keys(data, "analytics_usercount", "realm", related_table="realm") re_map_foreign_keys(data, "analytics_usercount", "user", related_table="user_profile") update_model_ids(UserCount, data, "analytics_usercount") bulk_import_model(data, UserCount) fix_datetime_fields(data, "analytics_streamcount") re_map_foreign_keys(data, "analytics_streamcount", "realm", related_table="realm") re_map_foreign_keys(data, "analytics_streamcount", "stream", related_table="stream") update_model_ids(StreamCount, data, "analytics_streamcount") bulk_import_model(data, StreamCount)
zulip/zulip
zerver/lib/import_realm.py
Python
apache-2.0
62,551
# =============================================================================== # Copyright 2011 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== import os import re import six from lxml.etree import ( ElementTree, Element, ParseError, XML, XMLSyntaxError, tostring, XMLParser as LXMLParser, ) # ============= local library imports ========================== # xml tokenizer pattern xml = re.compile("<([/?!]?\w+)|&(#?\w+);|([^<>&'\"=\s]+)|(\s+)|(.)") def scan(txt, target): def gettoken(space=0, scan=xml.scanner(txt).match): try: while 1: m = scan() code = m.lastindex text = m.group(m.lastindex) if not space or code != 4: return code, text except AttributeError: raise EOFError try: while 1: # cc, tt = gettoken() yield gettoken() except EOFError: pass except SyntaxError as v: raise def pprint_xml(txt): line = [] lines = [] indent = " " stack = [] skip_next = False for c, t in scan(txt, None): # print c, t, len(t) # print t, ord(t[-1]), ord('\n') # if t.endswith('\n'): # continue t = t.rstrip() # if not t: # continue # t = t.strip() # print c, t, line, stack if skip_next: skip_next = False continue if c == 1: if t.startswith("/"): stack.pop() line.append("<{}>".format(t)) lines.append("{}{}".format(indent * len(stack), "".join(line).strip())) line = [] skip_next = True continue else: lines.append( "{}{}".format(indent * (len(stack) - 1), "".join(line).strip()) ) line = [] if not t.startswith("?xml"): stack.append(t) line.append("<{}".format(t)) # if not line and c == 1: # line.append('<{}'.format(t)) # continue else: if c == 4: t = " " line.append(t) if line: lines.append("".join(line).strip()) # print '-------------------' # for li in lines: # print li # lines[0]=lines[0].lstrip() return "\n".join([li for li in lines if li.strip()]) def indent(elem, level=0): i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for ei in elem: indent(ei, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i class XMLParser(object): _root = None path = None _syntax_error = None def __init__(self, path=None, *args, **kw): if path: self.path = path if path.endswith(".xml"): try: self._parse_file(path) except ParseError as e: from pyface.message_dialog import warning warning(None, str(e)) else: self._root = Element("root") def _parse_file(self, p): txt = None if isinstance(p, (str, six.text_type)): txt = "" if os.path.isfile(p): with open(p, "rb") as rfile: txt = rfile.read() if txt is None: txt = p.read() try: self._root = XML(txt, parser=LXMLParser(remove_blank_text=True)) return True except XMLSyntaxError as e: print("Syntax error", p, e) self._syntax_error = str(e) print("asdfasdfas", p, self._syntax_error) def load(self, rfile): return self._parse_file(rfile) def add(self, tag, value, root=None, **kw): if root is None: root = self._root elem = self.new_element(tag, value, **kw) root.append(elem) return elem def new_element(self, tag, value, **kw): e = Element(tag, attrib=kw) if value not in ("", None): e.text = str(value) return e def get_root(self): return self._root def get_tree(self): return ElementTree(self._root) def save(self, p=None, pretty_print=True): if p is None: p = self.path if p and os.path.isdir(os.path.dirname(p)): indent(self._root) tree = self.get_tree() tree.write(p, xml_declaration=True, method="xml", pretty_print=pretty_print) def tostring(self, pretty_print=True): tree = self.get_tree() if tree: return tostring(tree, pretty_print=pretty_print) def get_elements(self, name=None): root = self.get_root() path = "//{}".format(name) return root.xpath(path) # return self._get_elements(None, True, name) def _get_elements(self, group, element, name): if group is None: group = self.get_root() return [v if element else v.text.strip() for v in group.findall(name)] # class XMLParser2(object): # ''' # wrapper for ElementTree # ''' # _tree = None # # def __init__(self, path=None, *args, **kw): # self._tree = ElementTree() # if path: # self._path = path # try: # self._parse_file(path) # except ParseError, e: # warning(None, str(e)) # # def load(self, fp): # ''' # path or file-like object # ''' # return self._parse_file(fp) # # def _parse_file(self, p): # self._tree.parse(p) # # def get_tree(self): # return self._tree # # def save(self, p=None): # if p is None: # p = self._path # # if p and os.path.isdir(os.path.dirname(p)): # # self.indent(self._tree.getroot()) # self._tree.write(p, pretty_print=True) # # # def indent(self, elem, level=0): # # i = '\n' + level * ' ' # # if len(elem): # # if not elem.text or not elem.text.strip(): # # elem.text = i + ' ' # # if not elem.tail or not elem.tail.strip(): # # elem.tail = i # # for elem in elem: # # self.indent(elem, level + 1) # # if not elem.tail or not elem.tail.strip(): # # elem.tail = i # # else: # # if level and (not elem.tail or not elem.tail.strip()): # # elem.tail = i # # def add_element(self, tag, value, root, **kw): # if root is None: # root = self._tree.getroot() # elem = self.new_element(tag, value, **kw) # root.append(elem) # return elem # # def new_element(self, tag, value, **kw): # e = Element(tag, attrib=kw) # # if value: # # e.text = value # return e # ============= EOF ====================================
USGSDenverPychron/pychron
pychron/core/xml/xml_parser.py
Python
apache-2.0
8,717
package org.testng.internal.thread; import org.testng.ITestNGMethod; /** Exception used to signal a thread timeout. */ public class ThreadTimeoutException extends Exception { private static final long serialVersionUID = 7009400729783393548L; public ThreadTimeoutException(String msg) { super(msg); } public ThreadTimeoutException(Throwable cause) { super(cause); } public ThreadTimeoutException(ITestNGMethod tm, long timeout) { this("Method " + tm.getQualifiedName() + "() didn't finish within the time-out " + timeout); } }
krmahadevan/testng
testng-core-api/src/main/java/org/testng/internal/thread/ThreadTimeoutException.java
Java
apache-2.0
558
/* * Copyright 2011 The Poderosa Project. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * $Id: PrivateKeyFileHeader.cs,v 1.1 2011/11/03 16:27:38 kzmi Exp $ */ using System; using System.Collections.Generic; using System.Text; namespace Granados.Poderosa.KeyFormat { /// <summary> /// Header and footer of the SSH private key file /// </summary> internal static class PrivateKeyFileHeader { public const string SSH1_HEADER = "SSH PRIVATE KEY FILE FORMAT 1.1\n"; public const string SSH2_OPENSSH_HEADER_RSA = "-----BEGIN RSA PRIVATE KEY-----"; public const string SSH2_OPENSSH_HEADER_DSA = "-----BEGIN DSA PRIVATE KEY-----"; public const string SSH2_SSHCOM_HEADER = "---- BEGIN SSH2 ENCRYPTED PRIVATE KEY ----"; public const string SSH2_SSHCOM_FOOTER = "---- END SSH2 ENCRYPTED PRIVATE KEY ----"; public const string SSH2_PUTTY_HEADER_1 = "PuTTY-User-Key-File-1:"; public const string SSH2_PUTTY_HEADER_2 = "PuTTY-User-Key-File-2:"; } }
ArsenShnurkov/poderosa
Granados/Poderosa/KeyFormat/PrivateKeyFileHeader.cs
C#
apache-2.0
1,118
# -*- coding: utf-8 -*- import mock import pytest from urlparse import urlparse from api.base.settings.defaults import API_BASE from framework.auth.core import Auth from osf.models import NodeLog from osf.models.licenses import NodeLicense from osf_tests.factories import ( NodeFactory, ProjectFactory, RegistrationFactory, AuthUserFactory, CollectionFactory, CommentFactory, NodeLicenseRecordFactory, PrivateLinkFactory, PreprintFactory, IdentifierFactory, ) from rest_framework import exceptions from tests.base import fake from tests.utils import assert_items_equal, assert_latest_log, assert_latest_log_not from website.views import find_bookmark_collection from website.util import permissions from website.util.sanitize import strip_html @pytest.fixture() def user(): return AuthUserFactory() @pytest.mark.django_db class TestNodeDetail: @pytest.fixture() def user_two(self): return AuthUserFactory() @pytest.fixture() def project_public(self, user): return ProjectFactory( title='Project One', is_public=True, creator=user) @pytest.fixture() def project_private(self, user): return ProjectFactory( title='Project Two', is_public=False, creator=user) @pytest.fixture() def component_public(self, user, project_public): return NodeFactory(parent=project_public, creator=user, is_public=True) @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) @pytest.fixture() def url_component_public(self, component_public): return '/{}nodes/{}/'.format(API_BASE, component_public._id) @pytest.fixture() def permissions_read(self): return ['read'] @pytest.fixture() def permissions_write(self): return ['read', 'write'] @pytest.fixture() def permissions_admin(self): return ['read', 'admin', 'write'] def test_return_project_details( self, app, user, user_two, project_public, project_private, url_public, url_private, permissions_read, permissions_admin): # test_return_public_project_details_logged_out res = app.get(url_public) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_public.title assert res.json['data']['attributes']['description'] == project_public.description assert res.json['data']['attributes']['category'] == project_public.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_read) # test_return_public_project_details_contributor_logged_in res = app.get(url_public, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_public.title assert res.json['data']['attributes']['description'] == project_public.description assert res.json['data']['attributes']['category'] == project_public.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_admin) # test_return_public_project_details_non_contributor_logged_in res = app.get(url_public, auth=user_two.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_public.title assert res.json['data']['attributes']['description'] == project_public.description assert res.json['data']['attributes']['category'] == project_public.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_read) # test_return_private_project_details_logged_in_admin_contributor res = app.get(url_private, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_private.title assert res.json['data']['attributes']['description'] == project_private.description assert res.json['data']['attributes']['category'] == project_private.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_admin) # test_return_private_project_details_logged_out res = app.get(url_private, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_return_private_project_details_logged_in_non_contributor res = app.get(url_private, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] def test_return_private_project_details_logged_in_write_contributor( self, app, user, user_two, project_private, url_private, permissions_write): project_private.add_contributor( contributor=user_two, auth=Auth(user), save=True) res = app.get(url_private, auth=user_two.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_private.title assert res.json['data']['attributes']['description'] == project_private.description assert res.json['data']['attributes']['category'] == project_private.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_write) def test_top_level_project_has_no_parent(self, app, url_public): res = app.get(url_public) assert res.status_code == 200 assert 'parent' not in res.json['data']['relationships'] assert 'id' in res.json['data'] assert res.content_type == 'application/vnd.api+json' def test_child_project_has_parent( self, app, user, project_public, url_public): public_component = NodeFactory( parent=project_public, creator=user, is_public=True) public_component_url = '/{}nodes/{}/'.format( API_BASE, public_component._id) res = app.get(public_component_url) assert res.status_code == 200 url = res.json['data']['relationships']['parent']['links']['related']['href'] assert urlparse(url).path == url_public def test_node_has(self, app, url_public): # test_node_has_children_link res = app.get(url_public) url = res.json['data']['relationships']['children']['links']['related']['href'] expected_url = '{}children/'.format(url_public) assert urlparse(url).path == expected_url # test_node_has_contributors_link res = app.get(url_public) url = res.json['data']['relationships']['contributors']['links']['related']['href'] expected_url = '{}contributors/'.format(url_public) assert urlparse(url).path == expected_url # test_node_has_node_links_link res = app.get(url_public) url = res.json['data']['relationships']['node_links']['links']['related']['href'] expected_url = '{}node_links/'.format(url_public) assert urlparse(url).path == expected_url # test_node_has_registrations_link res = app.get(url_public) url = res.json['data']['relationships']['registrations']['links']['related']['href'] expected_url = '{}registrations/'.format(url_public) assert urlparse(url).path == expected_url # test_node_has_files_link res = app.get(url_public) url = res.json['data']['relationships']['files']['links']['related']['href'] expected_url = '{}files/'.format(url_public) assert urlparse(url).path == expected_url def test_node_has_comments_link( self, app, user, project_public, url_public): CommentFactory(node=project_public, user=user) res = app.get(url_public) assert res.status_code == 200 assert 'comments' in res.json['data']['relationships'].keys() url = res.json['data']['relationships']['comments']['links']['related']['href'] res = app.get(url) assert res.status_code == 200 assert res.json['data'][0]['type'] == 'comments' def test_node_comments_link_query_params_formatted( self, app, user, project_public, project_private, url_private): CommentFactory(node=project_public, user=user) project_private_link = PrivateLinkFactory(anonymous=False) project_private_link.nodes.add(project_private) project_private_link.save() res = app.get(url_private, auth=user.auth) url = res.json['data']['relationships']['comments']['links']['related']['href'] assert project_private_link.key not in url res = app.get( '{}?view_only={}'.format( url_private, project_private_link.key)) url = res.json['data']['relationships']['comments']['links']['related']['href'] assert project_private_link.key in url def test_node_has_correct_unread_comments_count( self, app, user, project_public, url_public): contributor = AuthUserFactory() project_public.add_contributor( contributor=contributor, auth=Auth(user), save=True) CommentFactory( node=project_public, user=contributor, page='node') res = app.get( '{}?related_counts=True'.format(url_public), auth=user.auth) unread = res.json['data']['relationships']['comments']['links']['related']['meta']['unread'] unread_comments_node = unread['node'] assert unread_comments_node == 1 def test_node_properties(self, app, url_public): res = app.get(url_public) assert res.json['data']['attributes']['public'] is True assert res.json['data']['attributes']['registration'] is False assert res.json['data']['attributes']['collection'] is False assert res.json['data']['attributes']['tags'] == [] def test_requesting_folder_returns_error(self, app, user): folder = CollectionFactory(creator=user) res = app.get( '/{}nodes/{}/'.format(API_BASE, folder._id), auth=user.auth, expect_errors=True ) assert res.status_code == 404 def test_cannot_return_registrations_at_node_detail_endpoint( self, app, user, project_public): registration = RegistrationFactory( project=project_public, creator=user) res = app.get('/{}nodes/{}/'.format( API_BASE, registration._id), auth=user.auth, expect_errors=True) assert res.status_code == 404 def test_cannot_return_folder_at_node_detail_endpoint(self, app, user): folder = CollectionFactory(creator=user) res = app.get( '/{}nodes/{}/'.format(API_BASE, folder._id), auth=user.auth, expect_errors=True) assert res.status_code == 404 @pytest.mark.django_db class NodeCRUDTestCase: @pytest.fixture() def user_two(self): return AuthUserFactory() @pytest.fixture() def title(self): return 'Cool Project' @pytest.fixture() def title_new(self): return 'Super Cool Project' @pytest.fixture() def description(self): return 'A Properly Cool Project' @pytest.fixture() def description_new(self): return 'An even cooler project' @pytest.fixture() def category(self): return 'data' @pytest.fixture() def category_new(self): return 'project' @pytest.fixture() def project_public(self, user, title, description, category): return ProjectFactory( title=title, description=description, category=category, is_public=True, creator=user ) @pytest.fixture() def project_private(self, user, title, description, category): return ProjectFactory( title=title, description=description, category=category, is_public=False, creator=user ) @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) @pytest.fixture() def url_fake(self): return '/{}nodes/{}/'.format(API_BASE, '12345') @pytest.fixture() def make_node_payload(self): def payload(node, attributes): return { 'data': { 'id': node._id, 'type': 'nodes', 'attributes': attributes, } } return payload @pytest.mark.django_db class TestNodeUpdate(NodeCRUDTestCase): def test_node_update_invalid_data(self, app, user, url_public): res = app.put_json_api( url_public, 'Incorrect data', auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail res = app.put_json_api( url_public, ['Incorrect data'], auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail def test_cannot_make_project_public_if_non_contributor( self, app, project_private, url_private, make_node_payload): with assert_latest_log_not(NodeLog.MADE_PUBLIC, project_private): non_contrib = AuthUserFactory() res = app.patch_json( url_private, make_node_payload(project_private, {'public': True}), auth=non_contrib.auth, expect_errors=True ) assert res.status_code == 403 def test_cannot_make_project_public_if_non_admin_contributor( self, app, project_private, url_private, make_node_payload): non_admin = AuthUserFactory() project_private.add_contributor( non_admin, permissions=(permissions.READ, permissions.WRITE), auth=Auth(project_private.creator) ) project_private.save() res = app.patch_json( url_private, make_node_payload(project_private, {'public': True}), auth=non_admin.auth, expect_errors=True ) assert res.status_code == 403 project_private.reload() assert not project_private.is_public def test_can_make_project_public_if_admin_contributor( self, app, project_private, url_private, make_node_payload): with assert_latest_log(NodeLog.MADE_PUBLIC, project_private): admin_user = AuthUserFactory() project_private.add_contributor( admin_user, permissions=(permissions.READ, permissions.WRITE, permissions.ADMIN), auth=Auth(project_private.creator)) project_private.save() res = app.patch_json_api( url_private, make_node_payload(project_private, {'public': True}), auth=admin_user.auth # self.user is creator/admin ) assert res.status_code == 200 project_private.reload() assert project_private.is_public def test_update_errors( self, app, user, user_two, title_new, description_new, category_new, project_public, project_private, url_public, url_private): # test_update_project_properties_not_nested res = app.put_json_api(url_public, { 'id': project_public._id, 'type': 'nodes', 'title': title_new, 'description': description_new, 'category': category_new, 'public': True, }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Request must include /data.' assert res.json['errors'][0]['source']['pointer'] == '/data' # test_update_invalid_id res = app.put_json_api(url_public, { 'data': { 'id': '12345', 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_update_invalid_type res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'node', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_update_no_id res = app.put_json_api(url_public, { 'data': { 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/id' # test_update_no_type res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/type' # test_update_public_project_logged_out res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_update_project_invalid_title project = { 'data': { 'type': 'nodes', 'id': project_public._id, 'attributes': { 'title': 'A' * 201, 'category': 'project', } } } res = app.put_json_api( url_public, project, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Title cannot exceed 200 characters.' # test_update_public_project_logged_in_but_unauthorized res = app.put_json_api(url_public, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # test_update_private_project_logged_out res = app.put_json_api(url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': False } } }, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_update_private_project_logged_in_non_contributor res = app.put_json_api(url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': False } } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] def test_update_public_project_logged_in( self, app, user, title_new, description_new, category_new, project_public, url_public): with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public): res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == title_new assert res.json['data']['attributes']['description'] == description_new assert res.json['data']['attributes']['category'] == category_new def test_cannot_update_a_registration(self, app, user, project_public): registration = RegistrationFactory( project=project_public, creator=user) original_title = registration.title original_description = registration.description url = '/{}nodes/{}/'.format(API_BASE, registration._id) res = app.put_json_api(url, { 'data': { 'id': registration._id, 'type': 'nodes', 'attributes': { 'title': fake.catch_phrase(), 'description': fake.bs(), 'category': 'hypothesis', 'public': True } } }, auth=user.auth, expect_errors=True) registration.reload() assert res.status_code == 404 assert registration.title == original_title assert registration.description == original_description def test_update_private_project_logged_in_contributor( self, app, user, title_new, description_new, category_new, project_private, url_private): with assert_latest_log(NodeLog.UPDATED_FIELDS, project_private): res = app.put_json_api(url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': False } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == title_new assert res.json['data']['attributes']['description'] == description_new assert res.json['data']['attributes']['category'] == category_new def test_update_project_sanitizes_html_properly( self, app, user, category_new, project_public, url_public): with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public): """Post request should update resource, and any HTML in fields should be stripped""" new_title = '<strong>Super</strong> Cool Project' new_description = 'An <script>alert("even cooler")</script> project' res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': new_title, 'description': new_description, 'category': category_new, 'public': True, } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == strip_html( new_title) assert res.json['data']['attributes']['description'] == strip_html( new_description) def test_partial_update_project_updates_project_correctly_and_sanitizes_html( self, app, user, description, category, project_public, url_public): with assert_latest_log(NodeLog.EDITED_TITLE, project_public): new_title = 'An <script>alert("even cooler")</script> project' res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': new_title } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' res = app.get(url_public) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == strip_html( new_title) assert res.json['data']['attributes']['description'] == description assert res.json['data']['attributes']['category'] == category def test_partial_update_public_project_logged_in( self, app, user, title_new, description, category, project_public, url_public): with assert_latest_log(NodeLog.EDITED_TITLE, project_public): res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': title_new, } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == title_new assert res.json['data']['attributes']['description'] == description assert res.json['data']['attributes']['category'] == category def test_write_to_public_field_non_contrib_forbidden( self, app, user_two, project_public, url_public): # Test non-contrib writing to public field res = app.patch_json_api(url_public, { 'data': { 'attributes': { 'public': False}, 'id': project_public._id, 'type': 'nodes' } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] def test_partial_update_errors( self, app, user, user_two, title_new, project_public, project_private, url_public, url_private): # test_partial_update_public_project_logged_out res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': title_new } } }, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_partial_update_public_project_logged_in_but_unauthorized # Public resource, logged in, unauthorized res = app.patch_json_api(url_public, { 'data': { 'attributes': { 'title': title_new}, 'id': project_public._id, 'type': 'nodes', } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # test_partial_update_private_project_logged_out res = app.patch_json_api(url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new } } }, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_partial_update_private_project_logged_in_non_contributor res = app.patch_json_api(url_private, { 'data': { 'attributes': { 'title': title_new}, 'id': project_private._id, 'type': 'nodes', } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # test_partial_update_invalid_id res = app.patch_json_api(url_public, { 'data': { 'id': '12345', 'type': 'nodes', 'attributes': { 'title': title_new, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_partial_update_invalid_type res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'node', 'attributes': { 'title': title_new, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_partial_update_no_id res = app.patch_json_api(url_public, { 'data': { 'type': 'nodes', 'attributes': { 'title': title_new, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/id' # test_partial_update_no_type res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'attributes': { 'title': title_new, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/type' # Nothing will be updated here # test_partial_update_project_properties_not_nested res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'title': title_new, } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 def test_partial_update_private_project_logged_in_contributor( self, app, user, title_new, description, category, project_private, url_private): with assert_latest_log(NodeLog.EDITED_TITLE, project_private): res = app.patch_json_api(url_private, { 'data': { 'attributes': { 'title': title_new}, 'id': project_private._id, 'type': 'nodes', } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == title_new assert res.json['data']['attributes']['description'] == description assert res.json['data']['attributes']['category'] == category def test_multiple_patch_requests_with_same_category_generates_one_log( self, app, user, project_private, url_private, make_node_payload): project_private.category = 'project' project_private.save() new_category = 'data' payload = make_node_payload( project_private, attributes={'category': new_category}) original_n_logs = project_private.logs.count() res = app.patch_json_api(url_private, payload, auth=user.auth) assert res.status_code == 200 project_private.reload() assert project_private.category == new_category assert project_private.logs.count() == original_n_logs + 1 # sanity check app.patch_json_api(url_private, payload, auth=user.auth) project_private.reload() assert project_private.category == new_category assert project_private.logs.count() == original_n_logs + 1 def test_public_project_with_publicly_editable_wiki_turns_private( self, app, user, project_public, url_public, make_node_payload): wiki = project_public.get_addon('wiki') wiki.set_editing(permissions=True, auth=Auth(user=user), log=True) res = app.patch_json_api( url_public, make_node_payload(project_public, {'public': False}), auth=user.auth # self.user is creator/admin ) assert res.status_code == 200 @mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s') def test_set_node_private_updates_ezid( self, mock_update_ezid_metadata, app, user, project_public, url_public, make_node_payload): IdentifierFactory(referent=project_public, category='doi') res = app.patch_json_api( url_public, make_node_payload( project_public, {'public': False}), auth=user.auth) assert res.status_code == 200 project_public.reload() assert not project_public.is_public mock_update_ezid_metadata.assert_called_with( project_public._id, status='unavailable') @mock.patch('website.preprints.tasks.update_ezid_metadata_on_change') def test_set_node_with_preprint_private_updates_ezid( self, mock_update_ezid_metadata, app, user, project_public, url_public, make_node_payload): target_object = PreprintFactory(project=project_public) res = app.patch_json_api( url_public, make_node_payload( project_public, {'public': False}), auth=user.auth) assert res.status_code == 200 project_public.reload() assert not project_public.is_public mock_update_ezid_metadata.assert_called_with( target_object._id, status='unavailable') @pytest.mark.django_db class TestNodeDelete(NodeCRUDTestCase): def test_deletes_node_errors( self, app, user, user_two, project_public, project_private, url_public, url_private, url_fake): # test_deletes_public_node_logged_out res = app.delete(url_public, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_deletes_public_node_fails_if_unauthorized res = app.delete_json_api( url_public, auth=user_two.auth, expect_errors=True) project_public.reload() assert res.status_code == 403 assert project_public.is_deleted is False assert 'detail' in res.json['errors'][0] # test_deletes_private_node_logged_out res = app.delete(url_private, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_deletes_private_node_logged_in_non_contributor res = app.delete(url_private, auth=user_two.auth, expect_errors=True) project_private.reload() assert res.status_code == 403 assert project_private.is_deleted is False assert 'detail' in res.json['errors'][0] # test_deletes_invalid_node res = app.delete(url_fake, auth=user.auth, expect_errors=True) assert res.status_code == 404 assert 'detail' in res.json['errors'][0] def test_deletes_private_node_logged_in_read_only_contributor( self, app, user_two, project_private, url_private): project_private.add_contributor( user_two, permissions=[permissions.READ]) project_private.save() res = app.delete(url_private, auth=user_two.auth, expect_errors=True) project_private.reload() assert res.status_code == 403 assert project_private.is_deleted is False assert 'detail' in res.json['errors'][0] def test_delete_project_with_component_returns_error(self, app, user): project = ProjectFactory(creator=user) NodeFactory(parent=project, creator=user) # Return a 400 because component must be deleted before deleting the # parent res = app.delete_json_api( '/{}nodes/{}/'.format(API_BASE, project._id), auth=user.auth, expect_errors=True ) assert res.status_code == 400 errors = res.json['errors'] assert len(errors) == 1 assert ( errors[0]['detail'] == 'Any child components must be deleted prior to deleting this project.') def test_delete_bookmark_collection_returns_error(self, app, user): bookmark_collection = find_bookmark_collection(user) res = app.delete_json_api( '/{}nodes/{}/'.format(API_BASE, bookmark_collection._id), auth=user.auth, expect_errors=True ) # Bookmark collections are collections, so a 404 is returned assert res.status_code == 404 @mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s') def test_delete_node_with_preprint_calls_preprint_update_status( self, mock_update_ezid_metadata_on_change, app, user, project_public, url_public): PreprintFactory(project=project_public) app.delete_json_api(url_public, auth=user.auth, expect_errors=True) project_public.reload() assert mock_update_ezid_metadata_on_change.called @mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s') def test_delete_node_with_identifier_calls_preprint_update_status( self, mock_update_ezid_metadata_on_change, app, user, project_public, url_public): IdentifierFactory(referent=project_public, category='doi') app.delete_json_api(url_public, auth=user.auth, expect_errors=True) project_public.reload() assert mock_update_ezid_metadata_on_change.called def test_deletes_public_node_succeeds_as_owner( self, app, user, project_public, url_public): with assert_latest_log(NodeLog.PROJECT_DELETED, project_public): res = app.delete_json_api( url_public, auth=user.auth, expect_errors=True) project_public.reload() assert res.status_code == 204 assert project_public.is_deleted is True def test_requesting_deleted_returns_410( self, app, project_public, url_public): project_public.is_deleted = True project_public.save() res = app.get(url_public, expect_errors=True) assert res.status_code == 410 assert 'detail' in res.json['errors'][0] def test_deletes_private_node_logged_in_contributor( self, app, user, project_private, url_private): with assert_latest_log(NodeLog.PROJECT_DELETED, project_private): res = app.delete(url_private, auth=user.auth, expect_errors=True) project_private.reload() assert res.status_code == 204 assert project_private.is_deleted is True @pytest.mark.django_db class TestReturnDeletedNode: @pytest.fixture() def project_public_deleted(self, user): return ProjectFactory( is_deleted=True, creator=user, title='This public project has been deleted', category='project', is_public=True ) @pytest.fixture() def project_private_deleted(self, user): return ProjectFactory( is_deleted=True, creator=user, title='This private project has been deleted', category='project', is_public=False ) @pytest.fixture() def title_new(self): return 'This deleted node has been edited' @pytest.fixture() def url_project_public_deleted(self, project_public_deleted): return '/{}nodes/{}/'.format(API_BASE, project_public_deleted._id) @pytest.fixture() def url_project_private_deleted(self, project_private_deleted): return '/{}nodes/{}/'.format(API_BASE, project_private_deleted._id) def test_return_deleted_node( self, app, user, title_new, project_public_deleted, project_private_deleted, url_project_public_deleted, url_project_private_deleted): # test_return_deleted_public_node res = app.get(url_project_public_deleted, expect_errors=True) assert res.status_code == 410 # test_return_deleted_private_node res = app.get( url_project_private_deleted, auth=user.auth, expect_errors=True) assert res.status_code == 410 # test_edit_deleted_public_node res = app.put_json_api( url_project_public_deleted, params={ 'title': title_new, 'node_id': project_public_deleted._id, 'category': project_public_deleted.category }, auth=user.auth, expect_errors=True) assert res.status_code == 410 # test_edit_deleted_private_node res = app.put_json_api( url_project_private_deleted, params={ 'title': title_new, 'node_id': project_private_deleted._id, 'category': project_private_deleted.category }, auth=user.auth, expect_errors=True) assert res.status_code == 410 # test_delete_deleted_public_node res = app.delete( url_project_public_deleted, auth=user.auth, expect_errors=True) assert res.status_code == 410 # test_delete_deleted_private_node res = app.delete( url_project_private_deleted, auth=user.auth, expect_errors=True) assert res.status_code == 410 @pytest.mark.django_db class TestNodeTags: @pytest.fixture() def user_admin(self): return AuthUserFactory() @pytest.fixture() def user_non_contrib(self): return AuthUserFactory() @pytest.fixture() def user_read_contrib(self): return AuthUserFactory() @pytest.fixture() def project_public(self, user, user_admin): project_public = ProjectFactory( title='Project One', is_public=True, creator=user) project_public.add_contributor( user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) project_public.add_contributor( user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_public @pytest.fixture() def project_private(self, user, user_admin): project_private = ProjectFactory( title='Project Two', is_public=False, creator=user) project_private.add_contributor( user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) project_private.add_contributor( user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_private @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) @pytest.fixture() def payload_public(self, project_public): return { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'tags': ['new-tag'] } } } @pytest.fixture() def payload_private(self, project_private): return { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'tags': ['new-tag'] } } } def test_public_project_starts_with_no_tags(self, app, url_public): res = app.get(url_public) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_node_detail_does_not_expose_system_tags( self, app, project_public, url_public): project_public.add_system_tag('systag', save=True) res = app.get(url_public) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_contributor_can_add_tag_to_public_project( self, app, user, project_public, payload_public, url_public): with assert_latest_log(NodeLog.TAG_ADDED, project_public): res = app.patch_json_api( url_public, payload_public, auth=user.auth, expect_errors=True) assert res.status_code == 200 # Ensure data is correct from the PATCH response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'new-tag' # Ensure data is correct in the database project_public.reload() assert project_public.tags.count() == 1 assert project_public.tags.first()._id == 'new-tag' # Ensure data is correct when GETting the resource again reload_res = app.get(url_public) assert len(reload_res.json['data']['attributes']['tags']) == 1 assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag' def test_contributor_can_add_tag_to_private_project( self, app, user, project_private, payload_private, url_private): with assert_latest_log(NodeLog.TAG_ADDED, project_private): res = app.patch_json_api( url_private, payload_private, auth=user.auth) assert res.status_code == 200 # Ensure data is correct from the PATCH response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'new-tag' # Ensure data is correct in the database project_private.reload() assert project_private.tags.count() == 1 assert project_private.tags.first()._id == 'new-tag' # Ensure data is correct when GETting the resource again reload_res = app.get(url_private, auth=user.auth) assert len(reload_res.json['data']['attributes']['tags']) == 1 assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag' def test_partial_update_project_does_not_clear_tags( self, app, user_admin, project_private, payload_private, url_private): res = app.patch_json_api( url_private, payload_private, auth=user_admin.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 new_payload = { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'public': True } } } res = app.patch_json_api( url_private, new_payload, auth=user_admin.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 new_payload['data']['attributes']['public'] = False res = app.patch_json_api( url_private, new_payload, auth=user_admin.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 def test_add_tag_to_project_errors( self, app, user_non_contrib, user_read_contrib, payload_public, payload_private, url_public, url_private): # test_non_authenticated_user_cannot_add_tag_to_public_project res = app.patch_json_api( url_public, payload_public, expect_errors=True, auth=None) assert res.status_code == 401 # test_non_authenticated_user_cannot_add_tag_to_private_project res = app.patch_json_api( url_private, payload_private, expect_errors=True, auth=None) assert res.status_code == 401 # test_non_contributor_cannot_add_tag_to_public_project res = app.patch_json_api( url_public, payload_public, expect_errors=True, auth=user_non_contrib.auth) assert res.status_code == 403 # test_non_contributor_cannot_add_tag_to_private_project res = app.patch_json_api( url_private, payload_private, expect_errors=True, auth=user_non_contrib.auth) assert res.status_code == 403 # test_read_only_contributor_cannot_add_tag_to_public_project res = app.patch_json_api( url_public, payload_public, expect_errors=True, auth=user_read_contrib.auth) assert res.status_code == 403 # test_read_only_contributor_cannot_add_tag_to_private_project res = app.patch_json_api( url_private, payload_private, expect_errors=True, auth=user_read_contrib.auth) assert res.status_code == 403 def test_tags_add_and_remove_properly( self, app, user, project_private, payload_private, url_private): with assert_latest_log(NodeLog.TAG_ADDED, project_private): res = app.patch_json_api( url_private, payload_private, auth=user.auth) assert res.status_code == 200 # Ensure adding tag data is correct from the PATCH response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'new-tag' with assert_latest_log(NodeLog.TAG_REMOVED, project_private), assert_latest_log(NodeLog.TAG_ADDED, project_private, 1): # Ensure removing and adding tag data is correct from the PATCH # response res = app.patch_json_api( url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': {'tags': ['newer-tag']} } }, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'newer-tag' with assert_latest_log(NodeLog.TAG_REMOVED, project_private): # Ensure removing tag data is correct from the PATCH response res = app.patch_json_api( url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': {'tags': []} } }, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_tags_post_object_instead_of_list(self, user, app): url = '/{}nodes/'.format(API_BASE) payload = {'data': { 'type': 'nodes', 'attributes': { 'title': 'new title', 'category': 'project', 'tags': {'foo': 'bar'} } }} res = app.post_json_api( url, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".' def test_tags_patch_object_instead_of_list( self, app, user, payload_public, url_public): payload_public['data']['attributes']['tags'] = {'foo': 'bar'} res = app.patch_json_api( url_public, payload_public, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".' @pytest.mark.django_db class TestNodeLicense: @pytest.fixture() def user_admin(self): return AuthUserFactory() @pytest.fixture() def user_two(self): return AuthUserFactory() @pytest.fixture() def user_read_contrib(self): return AuthUserFactory() @pytest.fixture() def license_name(self): return 'MIT License' @pytest.fixture() def node_license(self, license_name): return NodeLicense.objects.filter(name=license_name).first() @pytest.fixture() def year(self): return '2105' @pytest.fixture() def copyright_holders(self): return ['Foo', 'Bar'] @pytest.fixture() def project_public( self, user, user_admin, node_license, year, copyright_holders): project_public = ProjectFactory( title='Project One', is_public=True, creator=user) project_public.add_contributor( user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) project_public.add_contributor( user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) project_public.node_license = NodeLicenseRecordFactory( node_license=node_license, year=year, copyright_holders=copyright_holders ) project_public.save() return project_public @pytest.fixture() def project_private( self, user, user_admin, node_license, year, copyright_holders): project_private = ProjectFactory( title='Project Two', is_public=False, creator=user) project_private.add_contributor( user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) project_private.add_contributor( user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) project_private.node_license = NodeLicenseRecordFactory( node_license=node_license, year=year, copyright_holders=copyright_holders ) project_private.save() return project_private @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) def test_node_has( self, app, user, node_license, project_public, project_private, url_private, url_public): # test_public_node_has_node_license res = app.get(url_public) assert project_public.node_license.year == res.json[ 'data']['attributes']['node_license']['year'] # test_public_node_has_license_relationship res = app.get(url_public) expected_license_url = '/{}licenses/{}'.format( API_BASE, node_license._id) actual_license_url = res.json['data']['relationships']['license']['links']['related']['href'] assert expected_license_url in actual_license_url # test_private_node_has_node_license res = app.get(url_private, auth=user.auth) assert project_private.node_license.year == res.json[ 'data']['attributes']['node_license']['year'] # test_private_node_has_license_relationship res = app.get(url_private, auth=user.auth) expected_license_url = '/{}licenses/{}'.format( API_BASE, node_license._id) actual_license_url = res.json['data']['relationships']['license']['links']['related']['href'] assert expected_license_url in actual_license_url def test_component_return_parent_license_if_no_license( self, app, user, node_license, project_public): node = NodeFactory(parent=project_public, creator=user) node.save() node_url = '/{}nodes/{}/'.format(API_BASE, node._id) res = app.get(node_url, auth=user.auth) assert not node.node_license assert project_public.node_license.year == \ res.json['data']['attributes']['node_license']['year'] actual_license_url = res.json['data']['relationships']['license']['links']['related']['href'] expected_license_url = '/{}licenses/{}'.format( API_BASE, node_license._id) assert expected_license_url in actual_license_url @pytest.mark.django_db class TestNodeUpdateLicense: @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def user_write_contrib(self): return AuthUserFactory() @pytest.fixture() def user_read_contrib(self): return AuthUserFactory() @pytest.fixture() def user_non_contrib(self): return AuthUserFactory() @pytest.fixture() def node(self, user_admin_contrib, user_write_contrib, user_read_contrib): node = NodeFactory(creator=user_admin_contrib) node.add_contributor(user_write_contrib, auth=Auth(user_admin_contrib)) node.add_contributor( user_read_contrib, auth=Auth(user_admin_contrib), permissions=['read']) node.save() return node @pytest.fixture() def license_cc0(self): return NodeLicense.objects.filter(name='CC0 1.0 Universal').first() @pytest.fixture() def license_mit(self): return NodeLicense.objects.filter(name='MIT License').first() @pytest.fixture() def license_no(self): return NodeLicense.objects.get(name='No license') @pytest.fixture() def url_node(self, node): return '/{}nodes/{}/'.format(API_BASE, node._id) @pytest.fixture() def make_payload(self): def payload( node_id, license_id=None, license_year=None, copyright_holders=None): attributes = {} if license_year and copyright_holders: attributes = { 'node_license': { 'year': license_year, 'copyright_holders': copyright_holders } } elif license_year: attributes = { 'node_license': { 'year': license_year } } elif copyright_holders: attributes = { 'node_license': { 'copyright_holders': copyright_holders } } return { 'data': { 'type': 'nodes', 'id': node_id, 'attributes': attributes, 'relationships': { 'license': { 'data': { 'type': 'licenses', 'id': license_id } } } } } if license_id else { 'data': { 'type': 'nodes', 'id': node_id, 'attributes': attributes } } return payload @pytest.fixture() def make_request(self, app): def request(url, data, auth=None, expect_errors=False): return app.patch_json_api( url, data, auth=auth, expect_errors=expect_errors) return request def test_admin_update_license_with_invalid_id( self, user_admin_contrib, node, make_payload, make_request, url_node): data = make_payload( node_id=node._id, license_id='thisisafakelicenseid' ) assert node.node_license is None res = make_request( url_node, data, auth=user_admin_contrib.auth, expect_errors=True) assert res.status_code == 404 assert res.json['errors'][0]['detail'] == 'Unable to find specified license.' node.reload() assert node.node_license is None def test_admin_can_update_license( self, user_admin_contrib, node, make_payload, make_request, license_cc0, url_node): data = make_payload( node_id=node._id, license_id=license_cc0._id ) assert node.node_license is None res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.reload() assert node.node_license.node_license == license_cc0 assert node.node_license.year is None assert node.node_license.copyright_holders == [] def test_admin_can_update_license_record( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): data = make_payload( node_id=node._id, license_id=license_no._id, license_year='2015', copyright_holders=['Mr. Monument', 'Princess OSF'] ) assert node.node_license is None res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.reload() assert node.node_license.node_license == license_no assert node.node_license.year == '2015' assert node.node_license.copyright_holders == [ 'Mr. Monument', 'Princess OSF'] def test_cannot_update( self, user_write_contrib, user_read_contrib, user_non_contrib, node, make_payload, make_request, license_cc0, url_node): # def test_rw_contributor_cannot_update_license(self): data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request( url_node, data, auth=user_write_contrib.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail # def test_read_contributor_cannot_update_license(self): data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request( url_node, data, auth=user_read_contrib.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail # def test_non_contributor_cannot_update_license(self): data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request( url_node, data, auth=user_non_contrib.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail # def test_unauthenticated_user_cannot_update_license(self): data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request(url_node, data, expect_errors=True) assert res.status_code == 401 assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail def test_update_node_with_existing_license_year_attribute_only( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2014', 'copyrightHolders': ['Reason', 'Mr. E'] }, Auth(user_admin_contrib), ) node.save() assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] data = make_payload( node_id=node._id, license_year='2015' ) res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.node_license.reload() assert node.node_license.node_license == license_no assert node.node_license.year == '2015' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] def test_update_node_with_existing_license_copyright_holders_attribute_only( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2014', 'copyrightHolders': ['Reason', 'Mr. E'] }, Auth(user_admin_contrib), ) node.save() assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] data = make_payload( node_id=node._id, copyright_holders=['Mr. Monument', 'Princess OSF'] ) res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.node_license.reload() assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == [ 'Mr. Monument', 'Princess OSF'] def test_update_node_with_existing_license_relationship_only( self, user_admin_contrib, node, make_payload, make_request, license_cc0, license_no, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2014', 'copyrightHolders': ['Reason', 'Mr. E'] }, Auth(user_admin_contrib), ) node.save() assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.node_license.reload() assert node.node_license.node_license == license_cc0 assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] def test_update_node_with_existing_license_relationship_and_attributes( self, user_admin_contrib, node, make_payload, make_request, license_no, license_cc0, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2014', 'copyrightHolders': ['Reason', 'Mr. E'] }, Auth(user_admin_contrib), save=True ) assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] data = make_payload( node_id=node._id, license_id=license_cc0._id, license_year='2015', copyright_holders=['Mr. Monument', 'Princess OSF'] ) res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.node_license.reload() assert node.node_license.node_license == license_cc0 assert node.node_license.year == '2015' assert node.node_license.copyright_holders == [ 'Mr. Monument', 'Princess OSF'] def test_update_node_license_without_required_year_in_payload( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): data = make_payload( node_id=node._id, license_id=license_no._id, copyright_holders=['Rick', 'Morty'] ) res = make_request( url_node, data, auth=user_admin_contrib.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'year must be specified for this license' def test_update_node_license_without_required_copyright_holders_in_payload_( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): data = make_payload( node_id=node._id, license_id=license_no._id, license_year='1994' ) res = make_request( url_node, data, auth=user_admin_contrib.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'copyrightHolders must be specified for this license' def test_update_node_license_adds_log( self, user_admin_contrib, node, make_payload, make_request, license_cc0, url_node): data = make_payload( node_id=node._id, license_id=license_cc0._id ) logs_before_update = node.logs.count() res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.reload() logs_after_update = node.logs.count() assert logs_before_update != logs_after_update assert node.logs.latest().action == 'license_changed' def test_update_node_license_without_change_does_not_add_log( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2015', 'copyrightHolders': ['Kim', 'Kanye'] }, auth=Auth(user_admin_contrib), save=True ) before_num_logs = node.logs.count() before_update_log = node.logs.latest() data = make_payload( node_id=node._id, license_id=license_no._id, license_year='2015', copyright_holders=['Kanye', 'Kim'] ) res = make_request(url_node, data, auth=user_admin_contrib.auth) node.reload() after_num_logs = node.logs.count() after_update_log = node.logs.latest() assert res.status_code == 200 assert before_num_logs == after_num_logs assert before_update_log._id == after_update_log._id
leb2dg/osf.io
api_tests/nodes/views/test_node_detail.py
Python
apache-2.0
73,296
package io.dropwizard.lifecycle.setup; import com.codahale.metrics.InstrumentedThreadFactory; import io.dropwizard.lifecycle.ExecutorServiceManager; import io.dropwizard.util.Duration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nonnull; import java.util.Formatter; import java.util.Locale; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicLong; public class ExecutorServiceBuilder { private static Logger log = LoggerFactory.getLogger(ExecutorServiceBuilder.class); private static final AtomicLong COUNT = new AtomicLong(0); private final LifecycleEnvironment environment; @Nonnull private final String nameFormat; private int corePoolSize; private int maximumPoolSize; private boolean allowCoreThreadTimeOut; private Duration keepAliveTime; private Duration shutdownTime; private BlockingQueue<Runnable> workQueue; private ThreadFactory threadFactory; private RejectedExecutionHandler handler; public ExecutorServiceBuilder(LifecycleEnvironment environment, String nameFormat, ThreadFactory factory) { this.environment = environment; this.nameFormat = nameFormat; this.corePoolSize = 0; this.maximumPoolSize = 1; this.allowCoreThreadTimeOut = false; this.keepAliveTime = Duration.seconds(60); this.shutdownTime = Duration.seconds(5); this.workQueue = new LinkedBlockingQueue<>(); this.threadFactory = factory; this.handler = new ThreadPoolExecutor.AbortPolicy(); } public ExecutorServiceBuilder(LifecycleEnvironment environment, String nameFormat) { this(environment, nameFormat, buildThreadFactory(nameFormat)); } private static ThreadFactory buildThreadFactory(String nameFormat) { ThreadFactory defaultThreadFactory = Executors.defaultThreadFactory(); // Validate the format string try (Formatter fmt = new Formatter()) { fmt.format(Locale.ROOT, nameFormat, 0); } return r -> { final Thread thread = defaultThreadFactory.newThread(r); thread.setName(String.format(Locale.ROOT, nameFormat, COUNT.incrementAndGet())); return thread; }; } public ExecutorServiceBuilder minThreads(int threads) { this.corePoolSize = threads; return this; } public ExecutorServiceBuilder maxThreads(int threads) { this.maximumPoolSize = threads; return this; } public ExecutorServiceBuilder allowCoreThreadTimeOut(boolean allowCoreThreadTimeOut) { this.allowCoreThreadTimeOut = allowCoreThreadTimeOut; return this; } public ExecutorServiceBuilder keepAliveTime(Duration time) { this.keepAliveTime = time; return this; } public ExecutorServiceBuilder shutdownTime(Duration time) { this.shutdownTime = time; return this; } public ExecutorServiceBuilder workQueue(BlockingQueue<Runnable> workQueue) { this.workQueue = workQueue; return this; } public ExecutorServiceBuilder rejectedExecutionHandler(RejectedExecutionHandler handler) { this.handler = handler; return this; } public ExecutorServiceBuilder threadFactory(ThreadFactory threadFactory) { this.threadFactory = threadFactory; return this; } public ExecutorService build() { if (corePoolSize != maximumPoolSize && maximumPoolSize > 1 && !isBoundedQueue()) { log.warn("Parameter 'maximumPoolSize' is conflicting with unbounded work queues"); } final String nameWithoutFormat = getNameWithoutFormat(nameFormat); final ThreadFactory instrumentedThreadFactory = new InstrumentedThreadFactory(threadFactory, environment.getMetricRegistry(), nameWithoutFormat); final ThreadPoolExecutor executor = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, keepAliveTime.getQuantity(), keepAliveTime.getUnit(), workQueue, instrumentedThreadFactory, handler); executor.allowCoreThreadTimeOut(allowCoreThreadTimeOut); environment.manage(new ExecutorServiceManager(executor, shutdownTime, nameFormat)); return executor; } static String getNameWithoutFormat(String nameFormat) { final String name = String.format(Locale.ROOT, nameFormat, 0); return commonPrefixWithoutHyphen(name, nameFormat) + commonSuffix(name, nameFormat); } static String commonPrefixWithoutHyphen(String name, String nameFormat) { final int minLength = Math.min(name.length(), nameFormat.length()); int diffIndex; for (diffIndex = 0; diffIndex < minLength; diffIndex++) { if (name.charAt(diffIndex) != nameFormat.charAt(diffIndex)) { break; } } if (diffIndex > 0 && name.charAt(diffIndex - 1) == '-') { diffIndex--; } return name.substring(0, diffIndex); } static String commonSuffix(String name, String nameFormat) { int nameIndex = name.length(); int nameFormatIndex = nameFormat.length(); while (--nameIndex >= 0 && --nameFormatIndex >= 0) { if (name.charAt(nameIndex) != nameFormat.charAt(nameFormatIndex)) { break; } } return name.substring(nameIndex + 1); } private boolean isBoundedQueue() { return workQueue.remainingCapacity() != Integer.MAX_VALUE; } static synchronized void setLog(Logger newLog) { log = newLog; } }
dropwizard/dropwizard
dropwizard-lifecycle/src/main/java/io/dropwizard/lifecycle/setup/ExecutorServiceBuilder.java
Java
apache-2.0
6,294
package com.vaadin.components.common.js; import java.util.List; import com.google.gwt.core.client.JavaScriptObject; import com.google.gwt.core.client.JsArrayMixed; import com.google.gwt.query.client.Function; import com.google.gwt.query.client.js.JsUtils; /** * Class with static utilities for @JsType * * TODO: revisit when JsInterop supports static or default methods in @JsType * interfaces * */ public abstract class JS { // This has to match with the @JsNamespace of the package-info of exported // components public static final String VAADIN_JS_NAMESPACE = "vaadin"; @SuppressWarnings("unchecked") public static <T> T createJsType(Class<T> clz) { if (clz == JsArrayMixed.class || clz == JSArray.class) { return (T) JavaScriptObject.createArray(); } return (T) JavaScriptObject.createObject(); } @SuppressWarnings("unchecked") public static <T> JSArray<T> createArray() { return createJsType(JSArray.class); } /** * Box a native JS array in a Java List. It does not have any performance * penalty because we directly change the native array of super ArrayList * implementation. */ public static native <T> List<T> asList(JavaScriptObject o) /*-{ var l = @java.util.ArrayList::new()(); l.@java.util.ArrayList::array = o; return l; }-*/; public static native boolean isPrimitiveType(Object dataItem) /*-{ return Object(dataItem) !== dataItem; }-*/; public static void definePropertyAccessors(Object jso, String propertyName, Setter setter, Getter getter) { JavaScriptObject setterJSO = setter != null ? JsUtils .wrapFunction(new Function() { @Override public void f() { setter.setValue(arguments(0)); } }) : null; JavaScriptObject getterJSO = getter != null ? JsUtils .wrapFunction(new Function() { @Override public Object f(Object... args) { JSArray<Object> array = JS.createArray(); array.push(getter.getValue()); return array; } }) : null; definePropertyAccessors((JavaScriptObject) jso, propertyName, setterJSO, getterJSO); } private static native void definePropertyAccessors( JavaScriptObject jsObject, String propertyName, JavaScriptObject setter, JavaScriptObject getter) /*-{ var _value = jsObject[propertyName]; Object.defineProperty(jsObject, propertyName, { get: function() { if (getter) { return getter()[0]; } return _value; }, set: function(value) { if (setter){ setter(value); } _value = value; } }); if (_value !== undefined){ jsObject[propertyName] = _value; } }-*/; public interface Setter { void setValue(Object value); } public interface Getter { Object getValue(); } public static <T> T exec(Object o, Object arg) { return JsUtils.jsni((JavaScriptObject) o, "call", (JavaScriptObject) o, arg); } public static native boolean isUndefinedOrNull(Object o) /*-{ return o === undefined || o === null; }-*/; public static native boolean isObject(Object o) /*-{ return typeof o === "object" && o !== null; }-*/; public static native JavaScriptObject getError(String msg) /*-{ return new Error(msg || ''); }-*/; public static native JavaScriptObject getUndefined() /*-{ return undefined; }-*/; }
jforge/components
vaadin-components-gwt/src/main/java/com/vaadin/components/common/js/JS.java
Java
apache-2.0
3,892
package httpblobprovider import ( "crypto/x509" "net/http" "github.com/cloudfoundry/bosh-agent/settings" boshblob "github.com/cloudfoundry/bosh-utils/blobstore" boshcrypto "github.com/cloudfoundry/bosh-utils/crypto" boshhttp "github.com/cloudfoundry/bosh-utils/httpclient" ) func NewBlobstoreHTTPClient(blobstoreSettings settings.Blobstore) (*http.Client, error) { var certpool *x509.CertPool caCert := fetchCaCertificate(blobstoreSettings.Options) if caCert != "" { var err error certpool, err = boshcrypto.CertPoolFromPEM([]byte(caCert)) if err != nil { return nil, err } } if isInternalBlobstore(blobstoreSettings.Type) { return boshhttp.CreateDefaultClient(certpool), nil } return boshhttp.CreateExternalDefaultClient(certpool), nil } func isInternalBlobstore(provider string) bool { switch provider { case boshblob.BlobstoreTypeDummy, boshblob.BlobstoreTypeLocal, "dav": return true default: return false } } func fetchCaCertificate(options map[string]interface{}) string { if options == nil { return "" } tls, ok := options["tls"] if !ok { return "" } tlsMap, ok := tls.(map[string]interface{}) if !ok { return "" } cert, ok := tlsMap["cert"] if !ok { return "" } certMap, ok := cert.(map[string]interface{}) if !ok { return "" } ca, ok := certMap["ca"].(string) if !ok { return "" } return ca }
cloudfoundry/bosh-agent
agent/httpblobprovider/blobstore_http_client.go
GO
apache-2.0
1,382
/* * Copyright 2005 MH-Software-Entwicklung. All rights reserved. * Use is subject to license terms. */ package com.jtattoo.plaf; import java.awt.*; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import javax.swing.*; import javax.swing.plaf.*; import javax.swing.plaf.basic.*; /** * @author Michael Hagen */ public class BaseProgressBarUI extends BasicProgressBarUI { protected PropertyChangeListener propertyChangeListener; public static ComponentUI createUI(JComponent c) { return new BaseProgressBarUI(); } public void installUI(JComponent c) { super.installUI(c); c.setBorder(UIManager.getBorder("ProgressBar.border")); propertyChangeListener = new PropertyChangeHandler(); c.addPropertyChangeListener(propertyChangeListener); } public void uninstallUI(JComponent c) { c.removePropertyChangeListener(propertyChangeListener); super.uninstallUI(c); } protected void installDefaults() { super.installDefaults(); } /** * The "selectionForeground" is the color of the text when it is painted * over a filled area of the progress bar. */ protected Color getSelectionForeground() { Object selectionForeground = progressBar.getClientProperty("selectionForeground"); if (selectionForeground instanceof Color) { return (Color)selectionForeground; } return super.getSelectionForeground(); } /** * The "selectionBackground" is the color of the text when it is painted * over an unfilled area of the progress bar. */ protected Color getSelectionBackground() { Object selectionBackground = progressBar.getClientProperty("selectionBackground"); if (selectionBackground instanceof Color) { return (Color)selectionBackground; } return super.getSelectionBackground(); } protected void paintIndeterminate(Graphics g, JComponent c) { if (!(g instanceof Graphics2D)) { return; } Graphics2D g2D = (Graphics2D) g; Insets b = progressBar.getInsets(); // area for border int barRectWidth = progressBar.getWidth() - (b.right + b.left); int barRectHeight = progressBar.getHeight() - (b.top + b.bottom); Color colors[] = null; if (progressBar.getForeground() instanceof UIResource) { if (!JTattooUtilities.isActive(c)) { colors = AbstractLookAndFeel.getTheme().getInActiveColors(); } else if (c.isEnabled()) { colors = AbstractLookAndFeel.getTheme().getProgressBarColors(); } else { colors = AbstractLookAndFeel.getTheme().getDisabledColors(); } } else { Color hiColor = ColorHelper.brighter(progressBar.getForeground(), 40); Color loColor = ColorHelper.darker(progressBar.getForeground(), 20); colors = ColorHelper.createColorArr(hiColor, loColor, 20); } Color cHi = ColorHelper.darker(colors[colors.length - 1], 5); Color cLo = ColorHelper.darker(colors[colors.length - 1], 10); // Paint the bouncing box. Rectangle boxRect = getBox(null); if (boxRect != null) { g2D.setColor(progressBar.getForeground()); JTattooUtilities.draw3DBorder(g, cHi, cLo, boxRect.x + 1, boxRect.y + 1, boxRect.width - 2, boxRect.height - 2); JTattooUtilities.fillHorGradient(g, colors, boxRect.x + 2, boxRect.y + 2, boxRect.width - 4, boxRect.height - 4); } // Deal with possible text painting if (progressBar.isStringPainted()) { Object savedRenderingHint = null; if (AbstractLookAndFeel.getTheme().isTextAntiAliasingOn()) { savedRenderingHint = g2D.getRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING); g2D.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, AbstractLookAndFeel.getTheme().getTextAntiAliasingHint()); } if (progressBar.getOrientation() == JProgressBar.HORIZONTAL) { paintString(g2D, b.left, b.top, barRectWidth, barRectHeight, boxRect.width, b); } else { paintString(g2D, b.left, b.top, barRectWidth, barRectHeight, boxRect.height, b); } if (AbstractLookAndFeel.getTheme().isTextAntiAliasingOn()) { g2D.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, savedRenderingHint); } } } protected void paintDeterminate(Graphics g, JComponent c) { if (!(g instanceof Graphics2D)) { return; } Graphics2D g2D = (Graphics2D) g; Insets b = progressBar.getInsets(); // area for border int w = progressBar.getWidth() - (b.right + b.left); int h = progressBar.getHeight() - (b.top + b.bottom); // amount of progress to draw int amountFull = getAmountFull(b, w, h); Color colors[] = null; if (progressBar.getForeground() instanceof UIResource) { if (!JTattooUtilities.isActive(c)) { colors = AbstractLookAndFeel.getTheme().getInActiveColors(); } else if (c.isEnabled()) { colors = AbstractLookAndFeel.getTheme().getProgressBarColors(); } else { colors = AbstractLookAndFeel.getTheme().getDisabledColors(); } } else { Color hiColor = ColorHelper.brighter(progressBar.getForeground(), 40); Color loColor = ColorHelper.darker(progressBar.getForeground(), 20); colors = ColorHelper.createColorArr(hiColor, loColor, 20); } Color cHi = ColorHelper.darker(colors[colors.length - 1], 5); Color cLo = ColorHelper.darker(colors[colors.length - 1], 10); if (progressBar.getOrientation() == JProgressBar.HORIZONTAL) { if (JTattooUtilities.isLeftToRight(progressBar)) { JTattooUtilities.draw3DBorder(g, cHi, cLo, 2, 2, amountFull - 2, h - 2); JTattooUtilities.fillHorGradient(g, colors, 3, 3, amountFull - 4, h - 4); } else { JTattooUtilities.draw3DBorder(g, cHi, cLo, w - amountFull + 2, 2, w - 2, h - 2); JTattooUtilities.fillHorGradient(g, colors, w - amountFull + 3, 3, w - 4, h - 4); } } else { // VERTICAL JTattooUtilities.draw3DBorder(g, cHi, cLo, 2, 2, w - 2, amountFull - 2); JTattooUtilities.fillVerGradient(g, colors, 3, 3, w - 4, amountFull - 4); } // Deal with possible text painting if (progressBar.isStringPainted()) { Object savedRenderingHint = null; if (AbstractLookAndFeel.getTheme().isTextAntiAliasingOn()) { savedRenderingHint = g2D.getRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING); g2D.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, AbstractLookAndFeel.getTheme().getTextAntiAliasingHint()); } paintString(g, b.left, b.top, w, h, amountFull, b); if (AbstractLookAndFeel.getTheme().isTextAntiAliasingOn()) { g2D.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, savedRenderingHint); } } } public void paint(Graphics g, JComponent c) { if (JTattooUtilities.getJavaVersion() >= 1.4) { if (progressBar.isIndeterminate()) { paintIndeterminate(g, c); } else { paintDeterminate(g, c); } } else { paintDeterminate(g, c); } } //----------------------------------------------------------------------------------------------- protected class PropertyChangeHandler implements PropertyChangeListener { public void propertyChange(PropertyChangeEvent e) { if ("selectionForeground".equals(e.getPropertyName()) && (e.getNewValue() instanceof Color)) { progressBar.invalidate(); progressBar.repaint(); } else if ("selectionBackground".equals(e.getPropertyName()) && (e.getNewValue() instanceof Color)) { progressBar.invalidate(); progressBar.repaint(); } } } }
joshuairl/toothchat-client
src/jtattoo/src/com/jtattoo/plaf/BaseProgressBarUI.java
Java
apache-2.0
8,338
/* * */ package onlinebookstore.entity; import java.io.Serializable; public class UserInfo implements Serializable { /** * */ private static final long serialVersionUID = -3016286917871450522L; private int userID; private String username; private String password; private int userRole; private String address; private String email; private int status; private int newsletter; public UserInfo() { status = -1; userID = -1; userRole = 1; } /** * @return the status, -1 stands for invalid. 0 stands for new. 1 stands for * LoginOk */ public int getStatus() { return status; } /** * @param status * : -1 stands for invalid. 0 stands for new. 1 stands for * LoginOk */ public void setStatus(int status) { this.status = status; } /** * @return the userID */ public int getUserID() { return userID; } /** * @param userID * the userID to set */ public void setUserID(int userID) { this.userID = userID; } /** * @return the username */ public String getUsername() { return username; } /** * @param username * the username to set */ public void setUsername(String username) { this.username = username; } /** * @return the password */ public String getPassword() { return password; } /** * @param password * the password to set */ public void setPassword(String password) { this.password = password; } /** * @return the userRole:0 for admin. */ public int getUserRole() { return userRole; } /** * @param userRole * the userRole to set */ public void setUserRole(int userRole) { this.userRole = userRole; } /** * @return the address */ public String getAddress() { return address; } /** * @param address * the address to set */ public void setAddress(String address) { this.address = address; } /** * @return the email */ public String getEmail() { return email; } /** * @param email * the email to set */ public void setEmail(String email) { this.email = email; } /** * @return the newsletter */ public int getNewsletter() { return newsletter; } /** * @param newsletter * the newsletter to set */ public void setNewsletter(int newsletter) { this.newsletter = newsletter; } }
ProjectOf714/OnlineBookstore
src/onlinebookstore/entity/UserInfo.java
Java
apache-2.0
2,529
using Microsoft.EntityFrameworkCore.Metadata.Builders; using SSW.DataOnion.Interfaces; using SSW.DataOnion.Sample.Entities; namespace SSW.DataOnion.Sample.Data.Configurations { public class AddressConfigurations : IEntityTypeConfiguration<Address> { public void Map(EntityTypeBuilder<Address> builder) { builder.HasKey(m => m.Id); builder.Ignore(m => m.FullAddress); } } }
SSWConsulting/SSW.DataOnion2
EF7/SSW.DataOnion/sample/SSW.DataOnion.Sample.Data/Configurations/AddressConfigurations.cs
C#
apache-2.0
437
const eg = require('../../eg'); module.exports = class extends eg.Generator { constructor (args, opts) { super(args, opts); this.configureCommand({ command: 'list [options]', description: 'List scopes', builder: yargs => yargs .usage(`Usage: $0 ${process.argv[2]} list [options]`) .example(`$0 ${process.argv[2]} list`) }); } prompting () { return this.admin.scopes.list() .then(res => { if (!res.scopes || !res.scopes.length) { return this.stdout('You have no scopes'); } res.scopes.forEach(scope => this.stdout(scope)); }) .catch(err => { this.log.error(err.message); }); } };
ExpressGateway/express-gateway
bin/generators/scopes/list.js
JavaScript
apache-2.0
717
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.checkpoint; import java.io.IOException; import javax.annotation.Nullable; import org.apache.flink.api.common.JobID; import org.apache.flink.api.common.time.Time; import org.apache.flink.api.common.typeutils.TypeSerializer; import org.apache.flink.configuration.Configuration; import org.apache.flink.core.testutils.CommonTestUtils; import org.apache.flink.metrics.groups.UnregisteredMetricsGroup; import org.apache.flink.runtime.execution.Environment; import org.apache.flink.runtime.executiongraph.ExecutionGraph; import org.apache.flink.runtime.executiongraph.ExecutionGraphBuilder; import org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy; import org.apache.flink.runtime.instance.SlotProvider; import org.apache.flink.runtime.jobgraph.JobGraph; import org.apache.flink.runtime.jobgraph.JobVertexID; import org.apache.flink.runtime.jobgraph.tasks.ExternalizedCheckpointSettings; import org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration; import org.apache.flink.runtime.jobgraph.tasks.JobCheckpointingSettings; import org.apache.flink.runtime.query.TaskKvStateRegistry; import org.apache.flink.runtime.state.AbstractKeyedStateBackend; import org.apache.flink.runtime.state.CheckpointStreamFactory; import org.apache.flink.runtime.state.KeyGroupRange; import org.apache.flink.runtime.state.OperatorStateBackend; import org.apache.flink.runtime.state.StateBackend; import org.apache.flink.runtime.testingUtils.TestingUtils; import org.apache.flink.util.SerializedValue; import org.apache.flink.util.TestLogger; import org.junit.Test; import java.io.Serializable; import java.net.URL; import java.net.URLClassLoader; import java.util.Collections; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * This test validates that the checkpoint settings serialize correctly * in the presence of user-defined objects. */ public class CheckpointSettingsSerializableTest extends TestLogger { @Test public void testDeserializationOfUserCodeWithUserClassLoader() throws Exception { final ClassLoader classLoader = new URLClassLoader(new URL[0], getClass().getClassLoader()); final Serializable outOfClassPath = CommonTestUtils.createObjectForClassNotInClassPath(classLoader); final MasterTriggerRestoreHook.Factory[] hooks = { new TestFactory(outOfClassPath) }; final SerializedValue<MasterTriggerRestoreHook.Factory[]> serHooks = new SerializedValue<>(hooks); final JobCheckpointingSettings checkpointingSettings = new JobCheckpointingSettings( Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), new CheckpointCoordinatorConfiguration( 1000L, 10000L, 0L, 1, ExternalizedCheckpointSettings.none(), true), new SerializedValue<StateBackend>(new CustomStateBackend(outOfClassPath)), serHooks); final JobGraph jobGraph = new JobGraph(new JobID(), "test job"); jobGraph.setSnapshotSettings(checkpointingSettings); // to serialize/deserialize the job graph to see if the behavior is correct under // distributed execution final JobGraph copy = CommonTestUtils.createCopySerializable(jobGraph); final ExecutionGraph eg = ExecutionGraphBuilder.buildGraph( null, copy, new Configuration(), TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.class), classLoader, new StandaloneCheckpointRecoveryFactory(), Time.seconds(10), new NoRestartStrategy(), new UnregisteredMetricsGroup(), 10, log); assertEquals(1, eg.getCheckpointCoordinator().getNumberOfRegisteredMasterHooks()); assertTrue(jobGraph.getCheckpointingSettings().getDefaultStateBackend().deserializeValue(classLoader) instanceof CustomStateBackend); } // ------------------------------------------------------------------------ private static final class TestFactory implements MasterTriggerRestoreHook.Factory { private static final long serialVersionUID = -612969579110202607L; private final Serializable payload; TestFactory(Serializable payload) { this.payload = payload; } @SuppressWarnings("unchecked") @Override public <V> MasterTriggerRestoreHook<V> create() { MasterTriggerRestoreHook<V> hook = mock(MasterTriggerRestoreHook.class); when(hook.getIdentifier()).thenReturn("id"); return hook; } } private static final class CustomStateBackend implements StateBackend { private static final long serialVersionUID = -6107964383429395816L; /** * Simulate a custom option that is not in the normal classpath. */ private Serializable customOption; public CustomStateBackend(Serializable customOption) { this.customOption = customOption; } @Override public CheckpointStreamFactory createStreamFactory( JobID jobId, String operatorIdentifier) throws IOException { return null; } @Override public CheckpointStreamFactory createSavepointStreamFactory( JobID jobId, String operatorIdentifier, @Nullable String targetLocation) throws IOException { return null; } @Override public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend( Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry) throws Exception { return null; } @Override public OperatorStateBackend createOperatorStateBackend( Environment env, String operatorIdentifier) throws Exception { return null; } } }
PangZhi/flink
flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointSettingsSerializableTest.java
Java
apache-2.0
6,521
package csdn.shimiso.eim.model; import java.io.Serializable; import java.util.Date; import csdn.shimiso.eim.comm.Constant; import csdn.shimiso.eim.util.DateUtil; /** * * ÏûϢʵÌå. * * @author shimiso */ public class Notice implements Serializable, Comparable<Notice> { /** * */ private static final long serialVersionUID = 1L; public static final int ADD_FRIEND = 1;// ºÃÓÑÇëÇó public static final int SYS_MSG = 2; // ϵͳÏûÏ¢ public static final int CHAT_MSG = 3;// ÁÄÌìÏûÏ¢ public static final int READ = 0; public static final int UNREAD = 1; public static final int All = 2; private String id; // Ö÷¼ü private String title; // ±êÌâ private String content; // ÄÚÈÝ private Integer status; // ״̬ 0ÒѶÁ 1δ¶Á private String from; // ֪ͨÀ´Ô´ private String to; // ֪ͨȥÏë private String noticeTime; // ֪ͨʱ¼ä private Integer noticeType; // ÏûÏ¢ÀàÐÍ 1.ºÃÓÑÇëÇó 2.ϵͳÏûÏ¢ public Integer getNoticeType() { return noticeType; } public void setNoticeType(Integer noticeType) { this.noticeType = noticeType; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getContent() { return content; } public void setContent(String content) { this.content = content; } public Integer getStatus() { return status; } public void setStatus(Integer status) { this.status = status; } public String getFrom() { return from; } public void setFrom(String from) { this.from = from; } public String getTo() { return to; } public void setTo(String to) { this.to = to; } public String getNoticeTime() { return noticeTime; } public void setNoticeTime(String noticeTime) { this.noticeTime = noticeTime; } @Override public int compareTo(Notice oth) { if (null == this.getNoticeTime() || null == oth.getNoticeTime()) { return 0; } String format = null; String time1 = ""; String time2 = ""; if (this.getNoticeTime().length() == oth.getNoticeTime().length() && this.getNoticeTime().length() == 23) { time1 = this.getNoticeTime(); time2 = oth.getNoticeTime(); format = Constant.MS_FORMART; } else { time1 = this.getNoticeTime().substring(0, 19); time2 = oth.getNoticeTime().substring(0, 19); } Date da1 = DateUtil.str2Date(time1, format); Date da2 = DateUtil.str2Date(time2, format); if (da1.before(da2)) { return 1; } if (da2.before(da1)) { return -1; } return 0; } }
ice-coffee/EIM
src/csdn/shimiso/eim/model/Notice.java
Java
apache-2.0
2,569